code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin __UpperCAmelCase = False @skip_mps class __lowercase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): snake_case_ = StableDiffusionAttendAndExcitePipeline snake_case_ = False snake_case_ = TEXT_TO_IMAGE_PARAMS snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} ) snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def __lowercase ( cls : str ): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowercase ( cls : Any ): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowercase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=A ,) UpperCAmelCase__ : List[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=A ,set_alpha_to_one=A ,) torch.manual_seed(0 ) UpperCAmelCase__ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,) torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="""gelu""" ,projection_dim=512 ,) UpperCAmelCase__ : int = CLIPTextModel(A ) UpperCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase__ : int = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowercase ( self : Union[str, Any] ,A : str ,A : str=0 ): '''simple docstring''' if str(A ).startswith("""mps""" ): UpperCAmelCase__ : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase__ : Union[str, Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase__ : List[Any] = { """prompt""": """a cat and a frog""", """token_indices""": [2, 5], """generator""": generator, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", """max_iter_to_alter""": 2, """thresholds""": {0: 0.7}, } return inputs def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = """cpu""" UpperCAmelCase__ : Tuple = self.get_dummy_components() UpperCAmelCase__ : List[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase__ : List[str] = self.get_dummy_inputs(A ) UpperCAmelCase__ : Union[str, Any] = pipe(**A ).images UpperCAmelCase__ : int = image[0, -3:, -3:, -1] self.assertEqual(image.shape ,(1, 64, 64, 3) ) UpperCAmelCase__ : str = np.array( [0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] ) UpperCAmelCase__ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A ,1e-3 ) def __lowercase ( self : Dict ): '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowercase ( self : List[str] ): '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=7e-4 ) def __lowercase ( self : Optional[int] ): '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def __lowercase ( self : List[str] ): '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def __lowercase ( self : Tuple ): '''simple docstring''' super().test_save_load_local(expected_max_difference=5e-4 ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class __lowercase ( unittest.TestCase ): @classmethod def __lowercase ( cls : Any ): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowercase ( cls : Optional[int] ): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowercase ( self : str ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = torch.manual_seed(51 ) UpperCAmelCase__ : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" ,safety_checker=A ,torch_dtype=torch.floataa ) pipe.to("""cuda""" ) UpperCAmelCase__ : Optional[Any] = """a painting of an elephant with glasses""" UpperCAmelCase__ : Any = [5, 7] UpperCAmelCase__ : str = pipe( prompt=A ,token_indices=A ,guidance_scale=7.5 ,generator=A ,num_inference_steps=5 ,max_iter_to_alter=5 ,output_type="""numpy""" ,).images[0] UpperCAmelCase__ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" ) assert np.abs((expected_image - image).max() ) < 5e-1
65
"""simple docstring""" import requests def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Tuple = {"""Content-Type""": """application/json"""} UpperCAmelCase__ : Optional[Any] = requests.post(__UpperCamelCase , json={"""text""": message_body} , headers=__UpperCamelCase ) if response.status_code != 200: UpperCAmelCase__ : Any = ( """Request to slack returned an error """ F"{response.status_code}, the response is:\n{response.text}" ) raise ValueError(__UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
65
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return int((input_a, input_a).count(0 ) != 0 ) def lowerCAmelCase ( ): '''simple docstring''' assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
65
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = CTRLTokenizer snake_case_ = False snake_case_ = False def __lowercase ( self : List[str] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase__ : Optional[int] = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase__ : int = {"""unk_token""": """<unk>"""} UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def __lowercase ( self : int ,**A : Dict ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : List[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = """adapt react readapt apt""" UpperCAmelCase__ : Any = """adapt react readapt apt""" return input_text, output_text def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase__ : Tuple = """adapt react readapt apt""" UpperCAmelCase__ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase__ : Dict = tokenizer.tokenize(A ) self.assertListEqual(A ,A ) UpperCAmelCase__ : Any = tokens + [tokenizer.unk_token] UpperCAmelCase__ : Dict = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
65
1
"""simple docstring""" import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase="shi-labs/oneformer_demo" ): '''simple docstring''' with open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) as f: UpperCAmelCase__ : Union[str, Any] = json.load(__UpperCamelCase ) UpperCAmelCase__ : Tuple = {} UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : str = [] for key, info in class_info.items(): UpperCAmelCase__ : Optional[int] = info["""name"""] class_names.append(info["""name"""] ) if info["isthing"]: thing_ids.append(int(__UpperCamelCase ) ) UpperCAmelCase__ : Any = thing_ids UpperCAmelCase__ : str = class_names return metadata class __lowercase ( unittest.TestCase ): def __init__( self : List[Any] ,A : Optional[int] ,A : Optional[Any]=7 ,A : Any=3 ,A : Tuple=30 ,A : int=400 ,A : Tuple=None ,A : Tuple=True ,A : Tuple=True ,A : Optional[int]=[0.5, 0.5, 0.5] ,A : List[Any]=[0.5, 0.5, 0.5] ,A : Dict=10 ,A : int=False ,A : Optional[int]=255 ,A : Any="shi-labs/oneformer_demo" ,A : Tuple="ade20k_panoptic.json" ,A : Dict=10 ,): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : Any = num_channels UpperCAmelCase__ : List[str] = min_resolution UpperCAmelCase__ : List[str] = max_resolution UpperCAmelCase__ : Optional[Any] = do_resize UpperCAmelCase__ : int = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : int = image_mean UpperCAmelCase__ : Tuple = image_std UpperCAmelCase__ : Optional[Any] = class_info_file UpperCAmelCase__ : List[str] = prepare_metadata(A ,A ) UpperCAmelCase__ : Tuple = num_text UpperCAmelCase__ : Dict = repo_path # for the post_process_functions UpperCAmelCase__ : List[str] = 2 UpperCAmelCase__ : Any = 10 UpperCAmelCase__ : List[Any] = 10 UpperCAmelCase__ : Optional[int] = 3 UpperCAmelCase__ : Optional[int] = 4 UpperCAmelCase__ : str = num_labels UpperCAmelCase__ : List[str] = do_reduce_labels UpperCAmelCase__ : int = ignore_index def __lowercase ( self : str ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def __lowercase ( self : Dict ,A : List[str] ,A : Dict=False ): '''simple docstring''' if not batched: UpperCAmelCase__ : Tuple = image_inputs[0] if isinstance(A ,Image.Image ): UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : Any = int(self.size["""shortest_edge"""] * h / w ) UpperCAmelCase__ : int = self.size["""shortest_edge"""] elif w > h: UpperCAmelCase__ : Any = self.size["""shortest_edge"""] UpperCAmelCase__ : Any = int(self.size["""shortest_edge"""] * w / h ) else: UpperCAmelCase__ : Optional[Any] = self.size["""shortest_edge"""] UpperCAmelCase__ : str = self.size["""shortest_edge"""] else: UpperCAmelCase__ : int = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase__ : str = max(A ,key=lambda A : item[0] )[0] UpperCAmelCase__ : List[Any] = max(A ,key=lambda A : item[1] )[1] return expected_height, expected_width def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) ,masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) ,) @require_torch @require_vision class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string snake_case_ = image_processing_class def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = OneFormerImageProcessorTester(self ) @property def __lowercase ( self : Tuple ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"""image_mean""" ) ) self.assertTrue(hasattr(A ,"""image_std""" ) ) self.assertTrue(hasattr(A ,"""do_normalize""" ) ) self.assertTrue(hasattr(A ,"""do_resize""" ) ) self.assertTrue(hasattr(A ,"""size""" ) ) self.assertTrue(hasattr(A ,"""ignore_index""" ) ) self.assertTrue(hasattr(A ,"""class_info_file""" ) ) self.assertTrue(hasattr(A ,"""num_text""" ) ) self.assertTrue(hasattr(A ,"""repo_path""" ) ) self.assertTrue(hasattr(A ,"""metadata""" ) ) self.assertTrue(hasattr(A ,"""do_reduce_labels""" ) ) def __lowercase ( self : Dict ): '''simple docstring''' pass def __lowercase ( self : List[str] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : List[str] = prepare_image_inputs(self.image_processing_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase__ : Tuple = image_processor(image_inputs[0] ,["""semantic"""] ,return_tensors="""pt""" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processing_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processing_tester.get_expected_values(A ,batched=A ) UpperCAmelCase__ : Tuple = image_processor( A ,["""semantic"""] * len(A ) ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) ,) def __lowercase ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input UpperCAmelCase__ : List[str] = image_processor(image_inputs[0] ,["""semantic"""] ,return_tensors="""pt""" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.image_processing_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(A ,batched=A ) UpperCAmelCase__ : Tuple = image_processor( A ,["""semantic"""] * len(A ) ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) ,) def __lowercase ( self : int ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processing_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input UpperCAmelCase__ : str = image_processor(image_inputs[0] ,["""semantic"""] ,return_tensors="""pt""" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(A ,batched=A ) UpperCAmelCase__ : Tuple = image_processor( A ,["""semantic"""] * len(A ) ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) ,) def __lowercase ( self : List[str] ,A : Optional[Any]=False ,A : List[Any]=False ,A : str="np" ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # prepare image and target UpperCAmelCase__ : Any = self.image_processing_tester.num_labels UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : int = None UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester ,equal_resolution=A ) if with_segmentation_maps: UpperCAmelCase__ : Optional[int] = num_labels if is_instance_map: UpperCAmelCase__ : int = list(range(A ) ) * 2 UpperCAmelCase__ : Dict = dict(enumerate(A ) ) UpperCAmelCase__ : Union[str, Any] = [ np.random.randint(0 ,high * 2 ,(img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": UpperCAmelCase__ : Optional[Any] = [Image.fromarray(A ) for annotation in annotations] UpperCAmelCase__ : Optional[Any] = image_processor( A ,["""semantic"""] * len(A ) ,A ,return_tensors="""pt""" ,instance_id_to_semantic_id=A ,pad_and_return_pixel_mask=A ,) return inputs def __lowercase ( self : Tuple ): '''simple docstring''' pass def __lowercase ( self : str ): '''simple docstring''' def common(A : str=False ,A : Tuple=None ): UpperCAmelCase__ : Tuple = self.comm_get_image_processor_inputs( with_segmentation_maps=A ,is_instance_map=A ,segmentation_type=A ) UpperCAmelCase__ : int = inputs["""mask_labels"""] UpperCAmelCase__ : Optional[int] = inputs["""class_labels"""] UpperCAmelCase__ : str = inputs["""pixel_values"""] UpperCAmelCase__ : int = inputs["""text_inputs"""] # check the batch_size for mask_label, class_label, text_input in zip(A ,A ,A ): self.assertEqual(mask_label.shape[0] ,class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] ,pixel_values.shape[2:] ) self.assertEqual(len(A ) ,self.image_processing_tester.num_text ) common() common(is_instance_map=A ) common(is_instance_map=A ,segmentation_type="""pil""" ) common(is_instance_map=A ,segmentation_type="""pil""" ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = np.zeros((20, 50) ) UpperCAmelCase__ : List[Any] = 1 UpperCAmelCase__ : List[Any] = 1 UpperCAmelCase__ : Optional[int] = 1 UpperCAmelCase__ : List[Any] = binary_mask_to_rle(A ) self.assertEqual(len(A ) ,4 ) self.assertEqual(rle[0] ,21 ) self.assertEqual(rle[1] ,45 ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file="""ade20k_panoptic.json""" ,num_text=self.image_processing_tester.num_text ,repo_path="""shi-labs/oneformer_demo""" ,) UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase__ : str = fature_extractor.post_process_semantic_segmentation(A ) self.assertEqual(len(A ) ,self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape ,( self.image_processing_tester.height, self.image_processing_tester.width, ) ,) UpperCAmelCase__ : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )] UpperCAmelCase__ : Any = fature_extractor.post_process_semantic_segmentation(A ,target_sizes=A ) self.assertEqual(segmentation[0].shape ,target_sizes[0] ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file="""ade20k_panoptic.json""" ,num_text=self.image_processing_tester.num_text ,repo_path="""shi-labs/oneformer_demo""" ,) UpperCAmelCase__ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase__ : Any = image_processor.post_process_instance_segmentation(A ,threshold=0 ) self.assertTrue(len(A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) ,A ) self.assertEqual( el["""segmentation"""].shape ,(self.image_processing_tester.height, self.image_processing_tester.width) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file="""ade20k_panoptic.json""" ,num_text=self.image_processing_tester.num_text ,repo_path="""shi-labs/oneformer_demo""" ,) UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase__ : str = image_processor.post_process_panoptic_segmentation(A ,threshold=0 ) self.assertTrue(len(A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) ,A ) self.assertEqual( el["""segmentation"""].shape ,(self.image_processing_tester.height, self.image_processing_tester.width) )
65
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
65
1
"""simple docstring""" import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging __UpperCAmelCase = logging.get_logger(__name__) def lowerCAmelCase ( __UpperCamelCase=None , __UpperCamelCase=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=__UpperCamelCase ) @dataclass class __lowercase : snake_case_ = list_field( default=[] , metadata={ """help""": ( """Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version""" """ of all available models""" ) } , ) snake_case_ = list_field( default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} ) snake_case_ = list_field( default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} ) snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Use FP16 to accelerate inference."""} ) snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Benchmark training of model"""} ) snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Verbose memory tracing"""} ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , ) snake_case_ = field( default=__lowerCamelCase , metadata={ """help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory""" } , ) snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Trace memory line by line"""} ) snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Save result to a CSV file"""} ) snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Save all print statements in a log file"""} ) snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Whether to print environment information"""} ) snake_case_ = field( default=__lowerCamelCase , metadata={ """help""": ( """Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use""" """ multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled""" """ for debugging / testing and on TPU.""" ) } , ) snake_case_ = field( default=F"inference_time_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , ) snake_case_ = field( default=F"inference_memory_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , ) snake_case_ = field( default=F"train_time_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , ) snake_case_ = field( default=F"train_memory_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , ) snake_case_ = field( default=F"env_info_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving environment information."""} , ) snake_case_ = field( default=F"log_{round(time() )}.csv" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , ) snake_case_ = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} ) snake_case_ = field( default=__lowerCamelCase , metadata={ """help""": ( """Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain""" """ model weights.""" ) } , ) def __lowercase ( self : int ): '''simple docstring''' warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" ,A ,) def __lowercase ( self : int ): '''simple docstring''' return json.dumps(dataclasses.asdict(self ) ,indent=2 ) @property def __lowercase ( self : List[str] ): '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
65
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """is_longer"""] def __init__( self : str ,A : Union[str, Any]=64 ,A : Tuple=48_000 ,A : Dict=480 ,A : List[str]=10 ,A : str=1_024 ,A : Any=0.0 ,A : Optional[int]=False ,A : float = 0 ,A : float = 14_000 ,A : int = None ,A : str = "fusion" ,A : str = "repeatpad" ,**A : List[Any] ,): '''simple docstring''' super().__init__( feature_size=A ,sampling_rate=A ,padding_value=A ,return_attention_mask=A ,**A ,) UpperCAmelCase__ : List[Any] = top_db UpperCAmelCase__ : Union[str, Any] = truncation UpperCAmelCase__ : Optional[int] = padding UpperCAmelCase__ : List[Any] = fft_window_size UpperCAmelCase__ : Optional[Any] = (fft_window_size >> 1) + 1 UpperCAmelCase__ : Any = hop_length UpperCAmelCase__ : List[str] = max_length_s UpperCAmelCase__ : List[Any] = max_length_s * sampling_rate UpperCAmelCase__ : List[Any] = sampling_rate UpperCAmelCase__ : Optional[int] = frequency_min UpperCAmelCase__ : Tuple = frequency_max UpperCAmelCase__ : List[str] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm=A ,mel_scale="""htk""" ,) UpperCAmelCase__ : str = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm="""slaney""" ,mel_scale="""slaney""" ,) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __lowercase ( self : List[str] ,A : np.array ,A : Optional[np.array] = None ): '''simple docstring''' UpperCAmelCase__ : Dict = spectrogram( A ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=A ,log_mel="""dB""" ,) return log_mel_spectrogram.T def __lowercase ( self : Optional[Any] ,A : Union[str, Any] ,A : int ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : List[str] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : int = [0] # randomly choose index for each part UpperCAmelCase__ : Tuple = np.random.choice(ranges[0] ) UpperCAmelCase__ : Tuple = np.random.choice(ranges[1] ) UpperCAmelCase__ : str = np.random.choice(ranges[2] ) UpperCAmelCase__ : List[str] = mel[idx_front : idx_front + chunk_frames, :] UpperCAmelCase__ : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] UpperCAmelCase__ : Dict = mel[idx_back : idx_back + chunk_frames, :] UpperCAmelCase__ : Optional[Any] = torch.tensor(mel[None, None, :] ) UpperCAmelCase__ : int = torch.nn.functional.interpolate( A ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=A ) UpperCAmelCase__ : Dict = mel_shrink[0][0].numpy() UpperCAmelCase__ : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def __lowercase ( self : Any ,A : np.array ,A : Optional[int] ,A : Any ,A : Tuple ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": UpperCAmelCase__ : int = True # random crop to max_length (for compatibility) -> this should be handled by self.pad UpperCAmelCase__ : str = len(A ) - max_length UpperCAmelCase__ : Optional[Any] = np.random.randint(0 ,overflow + 1 ) UpperCAmelCase__ : Optional[int] = waveform[idx : idx + max_length] UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": UpperCAmelCase__ : Tuple = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed UpperCAmelCase__ : int = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. UpperCAmelCase__ : List[Any] = np.stack([mel, mel, mel, mel] ,axis=0 ) UpperCAmelCase__ : Any = False else: UpperCAmelCase__ : Union[str, Any] = self._random_mel_fusion(A ,A ,A ) UpperCAmelCase__ : List[str] = True else: raise NotImplementedError(f"data_truncating {truncation} not implemented" ) else: UpperCAmelCase__ : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": UpperCAmelCase__ : str = int(max_length / len(A ) ) UpperCAmelCase__ : int = np.stack(np.tile(A ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": UpperCAmelCase__ : List[Any] = int(max_length / len(A ) ) UpperCAmelCase__ : str = np.stack(np.tile(A ,A ) ) UpperCAmelCase__ : Optional[Any] = np.pad(A ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": UpperCAmelCase__ : int = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : str ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : str = None ,A : Optional[str] = None ,A : Optional[int] = None ,A : Optional[int] = None ,A : Optional[Union[str, TensorType]] = None ,**A : List[str] ,): '''simple docstring''' UpperCAmelCase__ : Optional[int] = truncation if truncation is not None else self.truncation UpperCAmelCase__ : Dict = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[int] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : List[str] = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Any = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [np.asarray(A )] # convert to mel spectrogram, truncate and pad if needed. UpperCAmelCase__ : Tuple = [ self._get_input_mel(A ,max_length if max_length else self.nb_max_samples ,A ,A ) for waveform in raw_speech ] UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Tuple = [] for mel, longer in padded_inputs: input_mel.append(A ) is_longer.append(A ) if truncation == "fusion" and sum(A ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer UpperCAmelCase__ : List[str] = np.random.randint(0 ,len(A ) ) UpperCAmelCase__ : int = True if isinstance(input_mel[0] ,A ): UpperCAmelCase__ : Tuple = [np.asarray(A ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool UpperCAmelCase__ : List[str] = [[longer] for longer in is_longer] UpperCAmelCase__ : List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} UpperCAmelCase__ : str = BatchFeature(A ) if return_tensors is not None: UpperCAmelCase__ : int = input_features.convert_to_tensors(A ) return input_features
65
1
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm __UpperCAmelCase = re.compile('[^A-Za-z_0-9]') # parameters used in DuplicationIndex __UpperCAmelCase = 10 __UpperCAmelCase = 256 def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if len(__UpperCamelCase ) < MIN_NUM_TOKENS: return None UpperCAmelCase__ : Union[str, Any] = MinHash(num_perm=__UpperCamelCase ) for token in set(__UpperCamelCase ): min_hash.update(token.encode() ) return min_hash def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return {t for t in NON_ALPHA.split(__UpperCamelCase ) if len(t.strip() ) > 0} class __lowercase : def __init__( self : Tuple ,*, A : float = 0.8_5 ,): '''simple docstring''' UpperCAmelCase__ : Any = duplication_jaccard_threshold UpperCAmelCase__ : Any = NUM_PERM UpperCAmelCase__ : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm ) UpperCAmelCase__ : Any = defaultdict(A ) def __lowercase ( self : Union[str, Any] ,A : Tuple ,A : MinHash ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self._index.query(A ) if code_key in self._index.keys: print(f"Duplicate key {code_key}" ) return self._index.insert(A ,A ) if len(A ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(A ) break else: self._duplicate_clusters[close_duplicates[0]].add(A ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [] for base, duplicates in self._duplicate_clusters.items(): UpperCAmelCase__ : Dict = [base] + list(A ) # reformat the cluster to be a list of dict UpperCAmelCase__ : Tuple = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(A ) return duplicate_clusters def __lowercase ( self : Any ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_duplicate_clusters() with open(A ,"""w""" ) as f: json.dump(A ,A ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = element UpperCAmelCase__ : List[str] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(__UpperCamelCase , max_queue_size=10000 ) , chunksize=100 , ): if data is not None: yield data def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = DuplicationIndex(duplication_jaccard_threshold=__UpperCamelCase ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__UpperCamelCase ) ) , max_queue_size=100 ) ): di.add(__UpperCamelCase , __UpperCamelCase ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = get_tokens(__UpperCamelCase ) UpperCAmelCase__ : List[Any] = get_tokens(__UpperCamelCase ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) __UpperCAmelCase = None def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : List[str] = [] for elementa in cluster: UpperCAmelCase__ : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: UpperCAmelCase__ : Optional[int] = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(__UpperCamelCase , __UpperCamelCase ) >= jaccard_threshold: elementa["copies"] += 1 break else: UpperCAmelCase__ : Optional[Any] = 1 extremes.append(__UpperCamelCase ) return extremes def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' global _shared_dataset UpperCAmelCase__ : Union[str, Any] = dataset UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : Union[str, Any] = partial(_find_cluster_extremes_shared , jaccard_threshold=__UpperCamelCase ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( __UpperCamelCase , __UpperCamelCase , ) , total=len(__UpperCamelCase ) , ): extremes_list.append(__UpperCamelCase ) return extremes_list def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 0.85 ): '''simple docstring''' UpperCAmelCase__ : Any = make_duplicate_clusters(__UpperCamelCase , __UpperCamelCase ) UpperCAmelCase__ : Any = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Optional[Any] = find_extremes(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for extremes in extremes_clusters: for element in extremes: UpperCAmelCase__ : Tuple = element UpperCAmelCase__ : List[Any] = duplicate_indices - set(extreme_dict.keys() ) UpperCAmelCase__ : Union[str, Any] = dataset.filter(lambda __UpperCamelCase , __UpperCamelCase : idx not in remove_indices , with_indices=__UpperCamelCase ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: UpperCAmelCase__ : Union[str, Any] = element["""base_index"""] in extreme_dict if element["is_extreme"]: UpperCAmelCase__ : Optional[int] = extreme_dict[element["""base_index"""]]["""copies"""] print(F"Original dataset size: {len(__UpperCamelCase )}" ) print(F"Number of duplicate clusters: {len(__UpperCamelCase )}" ) print(F"Files in duplicate cluster: {len(__UpperCamelCase )}" ) print(F"Unique files in duplicate cluster: {len(__UpperCamelCase )}" ) print(F"Filtered dataset size: {len(__UpperCamelCase )}" ) return ds_filter, duplicate_clusters
65
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] ,A : Union[str, Any] ,A : Dict=7 ,A : Optional[int]=3 ,A : List[str]=18 ,A : Union[str, Any]=30 ,A : Tuple=400 ,A : Dict=True ,A : List[str]=None ,A : str=True ,A : Optional[Any]=False ,A : Optional[Any]=True ,A : List[str]=True ,A : Optional[int]=[0.5, 0.5, 0.5] ,A : List[str]=[0.5, 0.5, 0.5] ,): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : Union[str, Any] = image_size UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : Optional[int] = max_resolution UpperCAmelCase__ : str = do_resize UpperCAmelCase__ : Tuple = size if size is not None else {"""height""": 18, """width""": 20} UpperCAmelCase__ : List[str] = do_thumbnail UpperCAmelCase__ : Optional[int] = do_align_axis UpperCAmelCase__ : Union[str, Any] = do_pad UpperCAmelCase__ : Tuple = do_normalize UpperCAmelCase__ : Optional[Any] = image_mean UpperCAmelCase__ : List[Any] = image_std def __lowercase ( self : Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = DonutImageProcessor if is_vision_available() else None def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = DonutImageProcessingTester(self ) @property def __lowercase ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"""do_resize""" ) ) self.assertTrue(hasattr(A ,"""size""" ) ) self.assertTrue(hasattr(A ,"""do_thumbnail""" ) ) self.assertTrue(hasattr(A ,"""do_align_long_axis""" ) ) self.assertTrue(hasattr(A ,"""do_pad""" ) ) self.assertTrue(hasattr(A ,"""do_normalize""" ) ) self.assertTrue(hasattr(A ,"""image_mean""" ) ) self.assertTrue(hasattr(A ,"""image_std""" ) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 20} ) UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) ) self.assertEqual(image_processor.size ,{"""height""": 84, """width""": 42} ) def __lowercase ( self : Dict ): '''simple docstring''' pass @is_flaky() def __lowercase ( self : int ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : Tuple = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) @is_flaky() def __lowercase ( self : List[str] ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : Optional[int] = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) @is_flaky() def __lowercase ( self : Any ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : List[Any] = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,)
65
1
"""simple docstring""" import argparse import importlib from pathlib import Path # Test all the extensions added in the setup __UpperCAmelCase = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') __UpperCAmelCase = parser.parse_args() if args.check_lib: __UpperCAmelCase = importlib.import_module('transformers') __UpperCAmelCase = Path(transformers_module.__file__).parent else: __UpperCAmelCase = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
65
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class __lowercase ( __lowerCamelCase ): snake_case_ = """open-llama""" def __init__( self : Dict ,A : str=100_000 ,A : str=4_096 ,A : Optional[Any]=11_008 ,A : Tuple=32 ,A : str=32 ,A : Optional[int]="silu" ,A : List[Any]=2_048 ,A : str=0.0_2 ,A : Optional[int]=1e-6 ,A : int=True ,A : Tuple=0 ,A : str=1 ,A : Any=2 ,A : Optional[Any]=False ,A : int=True ,A : Any=0.1 ,A : Optional[Any]=0.1 ,A : Optional[Any]=True ,A : Union[str, Any]=True ,A : Tuple=None ,**A : Optional[int] ,): '''simple docstring''' UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : Optional[int] = rms_norm_eps UpperCAmelCase__ : Any = use_cache UpperCAmelCase__ : Optional[Any] = kwargs.pop( """use_memorry_efficient_attention""" ,A ) UpperCAmelCase__ : Any = hidden_dropout_prob UpperCAmelCase__ : str = attention_dropout_prob UpperCAmelCase__ : Optional[int] = use_stable_embedding UpperCAmelCase__ : Tuple = shared_input_output_embedding UpperCAmelCase__ : Tuple = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,tie_word_embeddings=A ,**A ,) def __lowercase ( self : Optional[Any] ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,A ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f"got {self.rope_scaling}" ) UpperCAmelCase__ : List[Any] = self.rope_scaling.get("""type""" ,A ) UpperCAmelCase__ : int = self.rope_scaling.get("""factor""" ,A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(A ,A ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
65
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' try: UpperCAmelCase__ : Union[str, Any] = float(__UpperCamelCase ) except ValueError: raise ValueError("""Please enter a valid number""" ) UpperCAmelCase__ : List[str] = decimal - int(__UpperCamelCase ) if fractional_part == 0: return int(__UpperCamelCase ), 1 else: UpperCAmelCase__ : Optional[Any] = len(str(__UpperCamelCase ).split(""".""" )[1] ) UpperCAmelCase__ : List[Any] = int(decimal * (10**number_of_frac_digits) ) UpperCAmelCase__ : Any = 10**number_of_frac_digits UpperCAmelCase__ , UpperCAmelCase__ : int = denominator, numerator while True: UpperCAmelCase__ : int = dividend % divisor if remainder == 0: break UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = divisor, remainder UpperCAmelCase__ , UpperCAmelCase__ : int = numerator / divisor, denominator / divisor return int(__UpperCamelCase ), int(__UpperCamelCase ) if __name__ == "__main__": print(F"{decimal_to_fraction(2) = }") print(F"{decimal_to_fraction(89.0) = }") print(F"{decimal_to_fraction('67') = }") print(F"{decimal_to_fraction('45.0') = }") print(F"{decimal_to_fraction(1.5) = }") print(F"{decimal_to_fraction('6.25') = }") print(F"{decimal_to_fraction('78td') = }")
65
"""simple docstring""" from collections.abc import Callable class __lowercase : def __init__( self : Tuple ,A : Callable | None = None ): '''simple docstring''' # Stores actual heap items. UpperCAmelCase__ : list = [] # Stores indexes of each item for supporting updates and deletion. UpperCAmelCase__ : dict = {} # Stores current size of heap. UpperCAmelCase__ : Any = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. UpperCAmelCase__ : int = key or (lambda A : x) def __lowercase ( self : Union[str, Any] ,A : int ): '''simple docstring''' return int((i - 1) / 2 ) if i > 0 else None def __lowercase ( self : Tuple ,A : int ): '''simple docstring''' UpperCAmelCase__ : Any = int(2 * i + 1 ) return left if 0 < left < self.size else None def __lowercase ( self : Any ,A : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = int(2 * i + 2 ) return right if 0 < right < self.size else None def __lowercase ( self : List[Any] ,A : int ,A : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.arr[j], self.arr[i] def __lowercase ( self : Optional[int] ,A : int ,A : int ): '''simple docstring''' return self.arr[i][1] < self.arr[j][1] def __lowercase ( self : Optional[int] ,A : int ): '''simple docstring''' UpperCAmelCase__ : int = self._left(A ) UpperCAmelCase__ : Dict = self._right(A ) UpperCAmelCase__ : Optional[int] = i if left is not None and not self._cmp(A ,A ): UpperCAmelCase__ : List[Any] = left if right is not None and not self._cmp(A ,A ): UpperCAmelCase__ : List[Any] = right return valid_parent def __lowercase ( self : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : int = self._parent(A ) while parent is not None and not self._cmp(A ,A ): self._swap(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : int = parent, self._parent(A ) def __lowercase ( self : str ,A : int ): '''simple docstring''' UpperCAmelCase__ : Any = self._get_valid_parent(A ) while valid_parent != index: self._swap(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = valid_parent, self._get_valid_parent(A ) def __lowercase ( self : Optional[Any] ,A : int ,A : int ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase__ : Tuple = self.pos_map[item] UpperCAmelCase__ : Dict = [item, self.key(A )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(A ) self._heapify_down(A ) def __lowercase ( self : List[Any] ,A : int ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase__ : Any = self.pos_map[item] del self.pos_map[item] UpperCAmelCase__ : Dict = self.arr[self.size - 1] UpperCAmelCase__ : List[Any] = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(A ) self._heapify_down(A ) def __lowercase ( self : str ,A : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : Dict = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(A )] ) else: UpperCAmelCase__ : List[str] = [item, self.key(A )] UpperCAmelCase__ : Union[str, Any] = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __lowercase ( self : str ): '''simple docstring''' return self.arr[0] if self.size else None def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def lowerCAmelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
65
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json', } class __lowercase ( __lowerCamelCase ): snake_case_ = """lxmert""" snake_case_ = {} def __init__( self : Optional[Any] ,A : Any=30_522 ,A : Dict=768 ,A : Optional[Any]=12 ,A : Optional[int]=9_500 ,A : List[Any]=1_600 ,A : int=400 ,A : List[Any]=3_072 ,A : List[Any]="gelu" ,A : str=0.1 ,A : Optional[Any]=0.1 ,A : Any=512 ,A : Tuple=2 ,A : Optional[int]=0.0_2 ,A : List[Any]=1e-12 ,A : List[Any]=9 ,A : Optional[int]=5 ,A : List[str]=5 ,A : List[Any]=2_048 ,A : Union[str, Any]=4 ,A : Tuple=6.6_7 ,A : Union[str, Any]=True ,A : Optional[Any]=True ,A : str=True ,A : Tuple=True ,A : Union[str, Any]=True ,A : int=True ,A : List[Any]=True ,**A : Tuple ,): '''simple docstring''' UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : List[Any] = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = max_position_embeddings UpperCAmelCase__ : List[str] = type_vocab_size UpperCAmelCase__ : Dict = initializer_range UpperCAmelCase__ : List[str] = layer_norm_eps UpperCAmelCase__ : Tuple = num_qa_labels UpperCAmelCase__ : Optional[Any] = num_object_labels UpperCAmelCase__ : Dict = num_attr_labels UpperCAmelCase__ : Optional[int] = l_layers UpperCAmelCase__ : Dict = x_layers UpperCAmelCase__ : int = r_layers UpperCAmelCase__ : Optional[int] = visual_feat_dim UpperCAmelCase__ : Optional[Any] = visual_pos_dim UpperCAmelCase__ : Any = visual_loss_normalizer UpperCAmelCase__ : Optional[Any] = task_matched UpperCAmelCase__ : str = task_mask_lm UpperCAmelCase__ : Union[str, Any] = task_obj_predict UpperCAmelCase__ : int = task_qa UpperCAmelCase__ : Union[str, Any] = visual_obj_loss UpperCAmelCase__ : Tuple = visual_attr_loss UpperCAmelCase__ : Tuple = visual_feat_loss UpperCAmelCase__ : int = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers} super().__init__(**A )
65
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """attention_mask"""] def __init__( self : Any ,A : str=80 ,A : Optional[int]=16_000 ,A : int=0.0 ,A : str=10 ,A : Any=25 ,A : str="hamming_window" ,A : int=3_2_7_6_8.0 ,A : List[str]=0.9_7 ,A : Optional[int]=1.0 ,A : Optional[Any]=True ,A : Tuple=True ,A : Any=False ,**A : int ,): '''simple docstring''' super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A ) UpperCAmelCase__ : str = feature_size UpperCAmelCase__ : int = sampling_rate UpperCAmelCase__ : int = padding_value UpperCAmelCase__ : Dict = hop_length UpperCAmelCase__ : int = win_length UpperCAmelCase__ : Dict = frame_signal_scale UpperCAmelCase__ : Dict = preemphasis_coeff UpperCAmelCase__ : str = mel_floor UpperCAmelCase__ : Any = normalize_means UpperCAmelCase__ : str = normalize_vars UpperCAmelCase__ : int = win_function UpperCAmelCase__ : List[Any] = return_attention_mask UpperCAmelCase__ : str = win_length * sampling_rate // 1_000 UpperCAmelCase__ : List[Any] = hop_length * sampling_rate // 1_000 UpperCAmelCase__ : int = optimal_fft_length(self.sample_size ) UpperCAmelCase__ : List[Any] = (self.n_fft // 2) + 1 def __lowercase ( self : Union[str, Any] ,A : np.array ): '''simple docstring''' if self.win_function == "hamming_window": UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A ) else: UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ) UpperCAmelCase__ : Union[str, Any] = mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,) UpperCAmelCase__ : Optional[Any] = spectrogram( one_waveform * self.frame_signal_scale ,window=A ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=A ,preemphasis=self.preemphasis_coeff ,mel_filters=A ,mel_floor=self.mel_floor ,log_mel="""log""" ,) return msfc_features.T def __lowercase ( self : str ,A : Any ,A : Optional[int] ,A : str ): '''simple docstring''' # make sure we normalize float32 arrays if self.normalize_means: UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 ) UpperCAmelCase__ : Any = np.subtract(A ,A ) if self.normalize_vars: UpperCAmelCase__ : str = x[:input_length].std(axis=0 ) UpperCAmelCase__ : Optional[int] = np.divide(A ,A ) if input_length < x.shape[0]: UpperCAmelCase__ : int = padding_value # make sure array is in float32 UpperCAmelCase__ : str = x.astype(np.floataa ) return x def __lowercase ( self : Union[str, Any] ,A : List[np.ndarray] ,A : Optional[np.ndarray] = None ): '''simple docstring''' UpperCAmelCase__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(A ,A ,self.padding_value ) for x, n in zip(A ,A )] def __call__( self : Union[str, Any] ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : Union[bool, str, PaddingStrategy] = False ,A : Optional[int] = None ,A : bool = False ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[Union[str, TensorType]] = None ,A : Optional[int] = None ,**A : Tuple ,): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[Any] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : Any = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : List[str] = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Union[str, Any] = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [raw_speech] # extract fbank features UpperCAmelCase__ : Tuple = [self._extract_mfsc_features(A ) for one_waveform in raw_speech] # convert into correct format for padding UpperCAmelCase__ : str = BatchFeature({"""input_features""": features} ) UpperCAmelCase__ : Optional[Any] = self.pad( A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,) # make sure list is in array format UpperCAmelCase__ : Tuple = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] ,A ): UpperCAmelCase__ : Union[str, Any] = [np.asarray(A ,dtype=np.floataa ) for feature in input_features] UpperCAmelCase__ : Dict = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: UpperCAmelCase__ : Union[str, Any] = ( np.array(A ,dtype=np.intaa ) if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) UpperCAmelCase__ : Any = self.normalize( padded_inputs["""input_features"""] ,attention_mask=A ) if return_tensors is not None: UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(A ) return padded_inputs
65
1
"""simple docstring""" import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class __lowercase : @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return self.get_dummy_input() @property def __lowercase ( self : Dict ): '''simple docstring''' if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." ) def __lowercase ( self : List[str] ,A : List[str]=True ,A : Optional[int]=False ,A : Any=False ,A : int=False ,): '''simple docstring''' UpperCAmelCase__ : int = 4 UpperCAmelCase__ : Tuple = 32 UpperCAmelCase__ : Optional[Any] = (32, 32) UpperCAmelCase__ : Dict = torch.manual_seed(0 ) UpperCAmelCase__ : int = torch.device(A ) UpperCAmelCase__ : List[str] = (batch_size, num_channels) + sizes UpperCAmelCase__ : List[Any] = randn_tensor(A ,generator=A ,device=A ) UpperCAmelCase__ : List[Any] = {"""hidden_states""": hidden_states} if include_temb: UpperCAmelCase__ : List[Any] = 128 UpperCAmelCase__ : Any = randn_tensor((batch_size, temb_channels) ,generator=A ,device=A ) if include_res_hidden_states_tuple: UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(1 ) UpperCAmelCase__ : Tuple = (randn_tensor(A ,generator=A ,device=A ),) if include_encoder_hidden_states: UpperCAmelCase__ : Any = floats_tensor((batch_size, 32, 32) ).to(A ) if include_skip_sample: UpperCAmelCase__ : Dict = randn_tensor(((batch_size, 3) + sizes) ,generator=A ,device=A ) return dummy_input def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = { """in_channels""": 32, """out_channels""": 32, """temb_channels""": 128, } if self.block_type == "up": UpperCAmelCase__ : Union[str, Any] = 32 if self.block_type == "mid": init_dict.pop("""out_channels""" ) UpperCAmelCase__ : Union[str, Any] = self.dummy_input return init_dict, inputs_dict def __lowercase ( self : List[Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase__ : List[Any] = self.block_class(**A ) unet_block.to(A ) unet_block.eval() with torch.no_grad(): UpperCAmelCase__ : Tuple = unet_block(**A ) if isinstance(A ,A ): UpperCAmelCase__ : Optional[Any] = output[0] self.assertEqual(output.shape ,self.output_shape ) UpperCAmelCase__ : Optional[Any] = output[0, -1, -3:, -3:] UpperCAmelCase__ : Dict = torch.tensor(A ).to(A ) assert torch_all_close(output_slice.flatten() ,A ,atol=5e-3 ) @unittest.skipIf(torch_device == """mps""" ,"""Training is not supported in mps""" ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase__ : List[str] = self.block_class(**A ) model.to(A ) model.train() UpperCAmelCase__ : List[Any] = model(**A ) if isinstance(A ,A ): UpperCAmelCase__ : Tuple = output[0] UpperCAmelCase__ : Any = torch.device(A ) UpperCAmelCase__ : Dict = randn_tensor(output.shape ,device=A ) UpperCAmelCase__ : str = torch.nn.functional.mse_loss(A ,A ) loss.backward()
65
"""simple docstring""" from math import factorial def lowerCAmelCase ( __UpperCamelCase = 100 ): '''simple docstring''' return sum(int(__UpperCamelCase ) for x in str(factorial(__UpperCamelCase ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
65
1
"""simple docstring""" import operator as op def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : int = [] UpperCAmelCase__ : Any = lambda __UpperCamelCase , __UpperCamelCase : int(x / y ) # noqa: E731 integer division operation UpperCAmelCase__ : List[Any] = { """^""": op.pow, """*""": op.mul, """/""": div, """+""": op.add, """-""": op.sub, } # operators & their respective operation # print table header print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ ) print("""-""" * (30 + len(__UpperCamelCase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__UpperCamelCase ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ ) else: UpperCAmelCase__ : int = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ ) UpperCAmelCase__ : Optional[Any] = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ ) stack.append( str(opr[x](int(__UpperCamelCase ) , int(__UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ , ) return int(stack[0] ) if __name__ == "__main__": __UpperCAmelCase = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ') print('\n\tResult = ', solve(Postfix))
65
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class __lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] ,A : Optional[int] ,A : int=13 ,A : Tuple=7 ,A : Dict=True ,A : Optional[int]=True ,A : Tuple=True ,A : str=True ,A : Any=99 ,A : Tuple=32 ,A : Dict=5 ,A : Optional[int]=4 ,A : Dict=37 ,A : Any="gelu" ,A : Any=0.1 ,A : Optional[int]=0.1 ,A : Union[str, Any]=512 ,A : Any=16 ,A : List[str]=2 ,A : List[Any]=0.0_2 ,A : Optional[int]=4 ,): '''simple docstring''' UpperCAmelCase__ : Dict = parent UpperCAmelCase__ : Any = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : Optional[int] = is_training UpperCAmelCase__ : Optional[Any] = use_attention_mask UpperCAmelCase__ : int = use_token_type_ids UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Any = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Dict = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : Any = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : List[Any] = type_vocab_size UpperCAmelCase__ : List[str] = type_sequence_label_size UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = num_choices def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase__ : List[str] = None if self.use_attention_mask: UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : int = DistilBertConfig( vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=A ,) return config, input_ids, attention_mask def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs UpperCAmelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = FlaxDistilBertModelTester(self ) @slow def __lowercase ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" ) UpperCAmelCase__ : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(A ) @require_flax class __lowercase ( unittest.TestCase ): @slow def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) UpperCAmelCase__ : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) UpperCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCAmelCase__ : Dict = model(A ,attention_mask=A )[0] UpperCAmelCase__ : List[Any] = (1, 11, 768) self.assertEqual(output.shape ,A ) UpperCAmelCase__ : Any = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,A ,atol=1e-4 ) )
65
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
65
"""simple docstring""" __UpperCAmelCase = frozenset( [ 'prompt', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'prompt', 'image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # Text guided image variation with an image mask 'prompt', 'image', 'mask_image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # image variation with an image mask 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image', 'mask_image']) __UpperCAmelCase = frozenset( [ 'example_image', 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset( [ 'prompt', 'audio_length_in_s', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset(['input_tokens']) __UpperCAmelCase = frozenset(['input_tokens'])
65
1
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = """vision-encoder-decoder""" snake_case_ = True def __init__( self : List[Any] ,**A : Union[str, Any] ): '''simple docstring''' super().__init__(**A ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" ) UpperCAmelCase__ : int = kwargs.pop("""encoder""" ) UpperCAmelCase__ : int = encoder_config.pop("""model_type""" ) UpperCAmelCase__ : str = kwargs.pop("""decoder""" ) UpperCAmelCase__ : Dict = decoder_config.pop("""model_type""" ) UpperCAmelCase__ : List[Any] = AutoConfig.for_model(A ,**A ) UpperCAmelCase__ : Any = AutoConfig.for_model(A ,**A ) UpperCAmelCase__ : Union[str, Any] = True @classmethod def __lowercase ( cls : List[Any] ,A : PretrainedConfig ,A : PretrainedConfig ,**A : Tuple ): '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) UpperCAmelCase__ : Union[str, Any] = True UpperCAmelCase__ : List[Any] = True return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**A ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Dict = self.encoder.to_dict() UpperCAmelCase__ : Any = self.decoder.to_dict() UpperCAmelCase__ : Dict = self.__class__.model_type return output class __lowercase ( __lowerCamelCase ): snake_case_ = version.parse("""1.11""" ) @property def __lowercase ( self : Optional[int] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowercase ( self : List[Any] ): '''simple docstring''' return 1e-4 @property def __lowercase ( self : List[Any] ): '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class __lowercase ( __lowerCamelCase ): @property def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : int = OrderedDict() UpperCAmelCase__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase__ : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase__ : List[str] = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def __lowercase ( self : Dict ,A : "PreTrainedTokenizerBase" ,A : int = -1 ,A : int = -1 ,A : bool = False ,A : Optional["TensorType"] = None ,): '''simple docstring''' import torch UpperCAmelCase__ : int = OrderedDict() UpperCAmelCase__ : List[Any] = super().generate_dummy_inputs( A ,batch_size=A ,seq_length=A ,is_pair=A ,framework=A ) UpperCAmelCase__ , UpperCAmelCase__ : int = dummy_input["""input_ids"""].shape UpperCAmelCase__ : int = (batch, encoder_sequence, self._config.encoder_hidden_size) UpperCAmelCase__ : Tuple = dummy_input.pop("""input_ids""" ) UpperCAmelCase__ : Optional[int] = dummy_input.pop("""attention_mask""" ) UpperCAmelCase__ : Dict = torch.zeros(A ) return common_inputs class __lowercase ( __lowerCamelCase ): @property def __lowercase ( self : str ): '''simple docstring''' pass def __lowercase ( self : Any ,A : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(A ) def __lowercase ( self : Dict ,A : PretrainedConfig ,A : PretrainedConfig ,A : str = "default" ): '''simple docstring''' UpperCAmelCase__ : List[str] = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(A ,A )
65
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __lowercase ( unittest.TestCase ): def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split() UpperCAmelCase__ : Tuple = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[Any] = { """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>""", } UpperCAmelCase__ : int = { """feature_size""": 1, """padding_value""": 0.0, """sampling_rate""": 16_000, """return_attention_mask""": False, """do_normalize""": True, } UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp() UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,A ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) # load decoder from hub UpperCAmelCase__ : int = """hf-internal-testing/ngram-beam-search-decoder""" def __lowercase ( self : str ,**A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.add_kwargs_tokens_map.copy() kwargs.update(A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : List[str] ,**A : Dict ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Any ,**A : List[Any] ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**A ) def __lowercase ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,A ) # decoder self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,) self.assertIsInstance(processor.decoder ,A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha ,5.0 ) self.assertEqual(processor.language_model.beta ,3.0 ) self.assertEqual(processor.language_model.score_boundary ,-7.0 ) self.assertEqual(processor.language_model.unk_score_offset ,3 ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["""xx"""] ) with self.assertRaisesRegex(A ,"""include""" ): WavaVecaProcessorWithLM( tokenizer=A ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : str = floats_list((3, 1_000) ) UpperCAmelCase__ : Optional[Any] = feature_extractor(A ,return_tensors="""np""" ) UpperCAmelCase__ : List[Any] = processor(A ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[int] = self.get_decoder() UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : List[Any] = """This is a test string""" UpperCAmelCase__ : int = processor(text=A ) UpperCAmelCase__ : Dict = tokenizer(A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def __lowercase ( self : Tuple ,A : List[Any]=(2, 10, 16) ,A : Dict=77 ): '''simple docstring''' np.random.seed(A ) return np.random.rand(*A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Dict = self._get_dummy_logits(shape=(10, 16) ,seed=13 ) UpperCAmelCase__ : Tuple = processor.decode(A ) UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(A )[0] self.assertEqual(decoded_decoder[0] ,decoded_processor.text ) self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text ) self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score ) @parameterized.expand([[None], ["""fork"""], ["""spawn"""]] ) def __lowercase ( self : List[str] ,A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : List[str] = processor.batch_decode(A ) else: with get_context(A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(A ,A ) UpperCAmelCase__ : Optional[Any] = list(A ) with get_context("""fork""" ).Pool() as p: UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams_batch(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(A ,decoded_processor.text ) self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text ) self.assertListEqual(A ,decoded_processor.logit_score ) self.assertListEqual(A ,decoded_processor.lm_score ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Any = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : List[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,) UpperCAmelCase__ : List[str] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(A ) with get_context("""fork""" ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( A ,A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,) UpperCAmelCase__ : List[Any] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : List[str] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(A ,A ) self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,A ) self.assertTrue(np.array_equal(A ,decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,A ,atol=1e-3 ) ) self.assertTrue(np.array_equal(A ,decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,A ,atol=1e-3 ) ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : Tuple = 2.0 UpperCAmelCase__ : str = 5.0 UpperCAmelCase__ : Union[str, Any] = -2_0.0 UpperCAmelCase__ : Optional[Any] = True UpperCAmelCase__ : str = processor.batch_decode( A ,alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,) UpperCAmelCase__ : Any = decoded_processor_out.text UpperCAmelCase__ : Union[str, Any] = list(A ) decoder.reset_params( alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,) with get_context("""fork""" ).Pool() as pool: UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch( A ,A ,) UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(A ,A ) self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,A ) UpperCAmelCase__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha ,2.0 ) self.assertEqual(lm_model.beta ,5.0 ) self.assertEqual(lm_model.unk_score_offset ,-2_0.0 ) self.assertEqual(lm_model.score_boundary ,A ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : str = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCAmelCase__ : Optional[int] = os.listdir(A ) UpperCAmelCase__ : List[Any] = ["""alphabet.json""", """language_model"""] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(A ,A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(A ) UpperCAmelCase__ : Tuple = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCAmelCase__ : Tuple = os.listdir(A ) UpperCAmelCase__ : Dict = os.listdir(A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(A ,A ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Dict = floats_list((3, 1_000) ) UpperCAmelCase__ : List[str] = processor_wavaveca(A ,return_tensors="""np""" ) UpperCAmelCase__ : Dict = processor_auto(A ,return_tensors="""np""" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 ) UpperCAmelCase__ : List[str] = self._get_dummy_logits() UpperCAmelCase__ : Tuple = processor_wavaveca.batch_decode(A ) UpperCAmelCase__ : List[str] = processor_auto.batch_decode(A ) self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) self.assertListEqual( processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,) @staticmethod def __lowercase ( A : Optional[Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets] return retrieved_list def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Dict = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(A ,output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(A ,A ) ) self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : int = self._get_dummy_logits() UpperCAmelCase__ : Any = processor.batch_decode(A ,output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(A ,A ) ) self.assertListEqual( [""" """.join(self.get_from_offsets(A ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] ) @slow @require_torch @require_torchaudio def __lowercase ( self : Tuple ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=A ) UpperCAmelCase__ : Tuple = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : Tuple = iter(A ) UpperCAmelCase__ : Optional[int] = next(A ) UpperCAmelCase__ : List[Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) UpperCAmelCase__ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : Tuple = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(A ).logits.cpu().numpy() UpperCAmelCase__ : Any = processor.decode(logits[0] ,output_word_offsets=A ) UpperCAmelCase__ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Union[str, Any] = [ { """start_time""": d["""start_offset"""] * time_offset, """end_time""": d["""end_offset"""] * time_offset, """word""": d["""word"""], } for d in output["""word_offsets"""] ] UpperCAmelCase__ : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL""" # output words self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,A ) self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,output.text ) # output times UpperCAmelCase__ : str = torch.tensor(self.get_from_offsets(A ,"""start_time""" ) ) UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(A ,"""end_time""" ) ) # fmt: off UpperCAmelCase__ : Union[str, Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) ) self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
65
1
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowercase ( __lowerCamelCase , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __lowercase ( unittest.TestCase ): @property def __lowercase ( self : Dict ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = ort.SessionOptions() UpperCAmelCase__ : int = False return options def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) UpperCAmelCase__ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) UpperCAmelCase__ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" ,revision="""onnx""" ,safety_checker=A ,feature_extractor=A ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[int] = """A red cat sitting on a park bench""" UpperCAmelCase__ : str = np.random.RandomState(0 ) UpperCAmelCase__ : Dict = pipe( prompt=A ,image=A ,mask_image=A ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A ,output_type="""np""" ,) UpperCAmelCase__ : Any = output.images UpperCAmelCase__ : List[str] = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase__ : str = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) UpperCAmelCase__ : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) UpperCAmelCase__ : List[str] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-inpainting""" ,subfolder="""scheduler""" ,revision="""onnx""" ) UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" ,revision="""onnx""" ,scheduler=A ,safety_checker=A ,feature_extractor=A ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase__ : str = """A red cat sitting on a park bench""" UpperCAmelCase__ : Tuple = np.random.RandomState(0 ) UpperCAmelCase__ : Dict = pipe( prompt=A ,image=A ,mask_image=A ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A ,output_type="""np""" ,) UpperCAmelCase__ : Dict = output.images UpperCAmelCase__ : Optional[Any] = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase__ : Tuple = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
65
"""simple docstring""" from sklearn.metrics import fa_score import datasets __UpperCAmelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' __UpperCAmelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' __UpperCAmelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): def __lowercase ( self : List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,) def __lowercase ( self : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Optional[Any]=None ,A : List[str]=1 ,A : Optional[Any]="binary" ,A : Any=None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = fa_score( A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A ) return {"f1": float(A ) if score.size == 1 else score}
65
1
"""simple docstring""" import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __UpperCAmelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __UpperCAmelCase = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) __UpperCAmelCase = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions __UpperCAmelCase = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) __UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image) __UpperCAmelCase = np.expand_dims(test_image, axis=0) __UpperCAmelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __UpperCAmelCase = 'Normal' if result[0][0] == 1: __UpperCAmelCase = 'Abnormality detected'
65
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model') __UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'} __UpperCAmelCase = '>>zh<<' __UpperCAmelCase = 'Helsinki-NLP/' if is_torch_available(): __UpperCAmelCase = 'pt' elif is_tf_available(): __UpperCAmelCase = 'tf' else: __UpperCAmelCase = 'jax' @require_sentencepiece class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = MarianTokenizer snake_case_ = False snake_case_ = True def __lowercase ( self : Optional[int] ): '''simple docstring''' super().setUp() UpperCAmelCase__ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] UpperCAmelCase__ : int = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(A ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(A ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) UpperCAmelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase ( self : List[Any] ,**A : List[Any] ): '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Union[str, Any] ,A : Tuple ): '''simple docstring''' return ( "This is a test", "This is a test", ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = """</s>""" UpperCAmelCase__ : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""</s>""" ) self.assertEqual(vocab_keys[1] ,"""<unk>""" ) self.assertEqual(vocab_keys[-1] ,"""<pad>""" ) self.assertEqual(len(A ) ,9 ) def __lowercase ( self : Dict ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,9 ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" ) UpperCAmelCase__ : List[str] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=A ) self.assertIsInstance(A ,A ) UpperCAmelCase__ : str = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(A ,batch.input_ids[0] ) UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(A ) UpperCAmelCase__ : Tuple = [x.name for x in Path(A ).glob("""*""" )] self.assertIn("""source.spm""" ,A ) MarianTokenizer.from_pretrained(A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] ,padding=A ,truncation=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch.input_ids.shape ,(2, 512) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) ) @slow def __lowercase ( self : Dict ): '''simple docstring''' # fmt: off UpperCAmelCase__ : Optional[int] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) UpperCAmelCase__ : Any = """Tämä on testi""" UpperCAmelCase__ : int = """This is a test""" UpperCAmelCase__ : List[str] = [76, 7, 2_047, 2] UpperCAmelCase__ : Optional[Any] = [69, 12, 11, 940, 2] UpperCAmelCase__ : List[str] = tokenizer(A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : Optional[int] = tokenizer(text_target=A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A ) self.assertEqual(A ,A )
65
1
"""simple docstring""" import itertools import math def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Dict = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def lowerCAmelCase ( __UpperCamelCase = 10001 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , __UpperCamelCase ) ) if __name__ == "__main__": print(F"{solution() = }")
65
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowercase ( metaclass=__lowerCamelCase ): snake_case_ = ["""onnx"""] def __init__( self : int ,*A : List[str] ,**A : int ): '''simple docstring''' requires_backends(self ,["""onnx"""] ) @classmethod def __lowercase ( cls : Optional[Any] ,*A : List[str] ,**A : Dict ): '''simple docstring''' requires_backends(cls ,["""onnx"""] ) @classmethod def __lowercase ( cls : List[Any] ,*A : Optional[int] ,**A : int ): '''simple docstring''' requires_backends(cls ,["""onnx"""] )
65
1
"""simple docstring""" __UpperCAmelCase = {str(digit): digit**5 for digit in range(10)} def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__UpperCamelCase ) ) def lowerCAmelCase ( ): '''simple docstring''' return sum( number for number in range(1000 , 1000000 ) if number == digits_fifth_powers_sum(__UpperCamelCase ) ) if __name__ == "__main__": print(solution())
65
"""simple docstring""" from argparse import ArgumentParser from .env import EnvironmentCommand def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) UpperCAmelCase__ : List[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(__UpperCamelCase ) # Let's go UpperCAmelCase__ : int = parser.parse_args() if not hasattr(__UpperCamelCase , """func""" ): parser.print_help() exit(1 ) # Run UpperCAmelCase__ : Union[str, Any] = args.func(__UpperCamelCase ) service.run() if __name__ == "__main__": main()
65
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("""String must only contain alphabetic characters.""" ) UpperCAmelCase__ : List[Any] = sorted(string.lower() ) return len(__UpperCamelCase ) == len(set(__UpperCamelCase ) ) if __name__ == "__main__": __UpperCAmelCase = input('Enter a string ').strip() __UpperCAmelCase = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
65
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __UpperCAmelCase = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __lowercase : snake_case_ = PegasusConfig snake_case_ = {} snake_case_ = """gelu""" def __init__( self : List[Any] ,A : int ,A : Optional[Any]=13 ,A : Dict=7 ,A : Dict=True ,A : Any=False ,A : Dict=99 ,A : int=32 ,A : Optional[int]=5 ,A : Union[str, Any]=4 ,A : Union[str, Any]=37 ,A : str=0.1 ,A : int=0.1 ,A : Optional[int]=20 ,A : Tuple=2 ,A : str=1 ,A : Optional[Any]=0 ,): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : int = is_training UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : Dict = hidden_size UpperCAmelCase__ : Optional[Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Any = intermediate_size UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = eos_token_id UpperCAmelCase__ : Union[str, Any] = pad_token_id UpperCAmelCase__ : List[str] = bos_token_id def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size ) UpperCAmelCase__ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 ) UpperCAmelCase__ : Any = np.concatenate([input_ids, eos_tensor] ,axis=1 ) UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase__ : str = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) UpperCAmelCase__ : Optional[Any] = prepare_pegasus_inputs_dict(A ,A ,A ) return config, inputs_dict def __lowercase ( self : Any ,A : Optional[int] ,A : str ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = 20 UpperCAmelCase__ : Dict = model_class_name(A ) UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A ) UpperCAmelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" ) UpperCAmelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,) UpperCAmelCase__ : Optional[int] = model.decode( decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,) UpperCAmelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" ) UpperCAmelCase__ : int = model.decode( decoder_input_ids[:, -1:] ,A ,decoder_attention_mask=A ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=A ,) UpperCAmelCase__ : Dict = model.decode(A ,A ) UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" ) def __lowercase ( self : Optional[int] ,A : str ,A : Dict ,A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = 20 UpperCAmelCase__ : str = model_class_name(A ) UpperCAmelCase__ : Any = model.encode(inputs_dict["""input_ids"""] ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) UpperCAmelCase__ : Optional[int] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] ,axis=-1 ,) UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A ) UpperCAmelCase__ : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,) UpperCAmelCase__ : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,) UpperCAmelCase__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" ) UpperCAmelCase__ : Dict = model.decode( decoder_input_ids[:, -1:] ,A ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=A ,decoder_position_ids=A ,) UpperCAmelCase__ : Union[str, Any] = model.decode(A ,A ,decoder_attention_mask=A ) UpperCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase__ : Union[str, Any] = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: UpperCAmelCase__ : Tuple = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) snake_case_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () snake_case_ = True snake_case_ = False snake_case_ = False snake_case_ = False def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = FlaxPegasusModelTester(self ) UpperCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=A ) def __lowercase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(A ,A ,A ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(A ,A ,A ) def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A ) UpperCAmelCase__ : int = model_class(A ) @jax.jit def encode_jitted(A : Optional[int] ,A : Union[str, Any]=None ,**A : Optional[Any] ): return model.encode(input_ids=A ,attention_mask=A ) with self.subTest("""JIT Enabled""" ): UpperCAmelCase__ : int = encode_jitted(**A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCAmelCase__ : Dict = encode_jitted(**A ).to_tuple() self.assertEqual(len(A ) ,len(A ) ) for jitted_output, output in zip(A ,A ): self.assertEqual(jitted_output.shape ,output.shape ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : Dict = model_class(A ) UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] ) UpperCAmelCase__ : Dict = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(A : List[Any] ,A : Any ,A : List[Any] ): return model.decode( decoder_input_ids=A ,decoder_attention_mask=A ,encoder_outputs=A ,) with self.subTest("""JIT Enabled""" ): UpperCAmelCase__ : Tuple = decode_jitted(**A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCAmelCase__ : str = decode_jitted(**A ).to_tuple() self.assertEqual(len(A ) ,len(A ) ) for jitted_output, output in zip(A ,A ): self.assertEqual(jitted_output.shape ,output.shape ) @slow def __lowercase ( self : List[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=A ) UpperCAmelCase__ : Any = np.ones((1, 1) ) UpperCAmelCase__ : Optional[Any] = model(A ) self.assertIsNotNone(A ) @slow def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) UpperCAmelCase__ : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) UpperCAmelCase__ : Union[str, Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] UpperCAmelCase__ : str = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] UpperCAmelCase__ : str = tokenizer(A ,return_tensors="""np""" ,truncation=A ,max_length=512 ,padding=A ) UpperCAmelCase__ : Union[str, Any] = model.generate(**A ,num_beams=2 ).sequences UpperCAmelCase__ : int = tokenizer.batch_decode(A ,skip_special_tokens=A ) assert tgt_text == decoded
65
1
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model') __UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'} __UpperCAmelCase = '>>zh<<' __UpperCAmelCase = 'Helsinki-NLP/' if is_torch_available(): __UpperCAmelCase = 'pt' elif is_tf_available(): __UpperCAmelCase = 'tf' else: __UpperCAmelCase = 'jax' @require_sentencepiece class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = MarianTokenizer snake_case_ = False snake_case_ = True def __lowercase ( self : Optional[int] ): '''simple docstring''' super().setUp() UpperCAmelCase__ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] UpperCAmelCase__ : int = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(A ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(A ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) UpperCAmelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase ( self : List[Any] ,**A : List[Any] ): '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Union[str, Any] ,A : Tuple ): '''simple docstring''' return ( "This is a test", "This is a test", ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = """</s>""" UpperCAmelCase__ : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""</s>""" ) self.assertEqual(vocab_keys[1] ,"""<unk>""" ) self.assertEqual(vocab_keys[-1] ,"""<pad>""" ) self.assertEqual(len(A ) ,9 ) def __lowercase ( self : Dict ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,9 ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" ) UpperCAmelCase__ : List[str] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=A ) self.assertIsInstance(A ,A ) UpperCAmelCase__ : str = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(A ,batch.input_ids[0] ) UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(A ) UpperCAmelCase__ : Tuple = [x.name for x in Path(A ).glob("""*""" )] self.assertIn("""source.spm""" ,A ) MarianTokenizer.from_pretrained(A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] ,padding=A ,truncation=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch.input_ids.shape ,(2, 512) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) ) @slow def __lowercase ( self : Dict ): '''simple docstring''' # fmt: off UpperCAmelCase__ : Optional[int] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) UpperCAmelCase__ : Any = """Tämä on testi""" UpperCAmelCase__ : int = """This is a test""" UpperCAmelCase__ : List[str] = [76, 7, 2_047, 2] UpperCAmelCase__ : Optional[Any] = [69, 12, 11, 940, 2] UpperCAmelCase__ : List[str] = tokenizer(A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : Optional[int] = tokenizer(text_target=A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A ) self.assertEqual(A ,A )
65
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) UpperCAmelCase__ : Union[str, Any] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
65
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig __UpperCAmelCase = [ 'openmmlab/upernet-convnext-tiny', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring __UpperCAmelCase = 'UperNetConfig' class __lowercase ( nn.Module ): def __init__( self : int ,A : int ,A : int ,A : Union[int, Tuple[int, int]] ,A : Union[int, Tuple[int, int], str] = 0 ,A : bool = False ,A : Union[int, Tuple[int, int]] = 1 ,): '''simple docstring''' super().__init__() UpperCAmelCase__ : Any = nn.Convad( in_channels=A ,out_channels=A ,kernel_size=A ,padding=A ,bias=A ,dilation=A ,) UpperCAmelCase__ : List[Any] = nn.BatchNormad(A ) UpperCAmelCase__ : Optional[Any] = nn.ReLU() def __lowercase ( self : List[Any] ,A : torch.Tensor ): '''simple docstring''' UpperCAmelCase__ : Any = self.conv(A ) UpperCAmelCase__ : Optional[Any] = self.batch_norm(A ) UpperCAmelCase__ : Dict = self.activation(A ) return output class __lowercase ( nn.Module ): def __init__( self : int ,A : int ,A : int ,A : int ): '''simple docstring''' super().__init__() UpperCAmelCase__ : int = [ nn.AdaptiveAvgPoolad(A ), UperNetConvModule(A ,A ,kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(A ) ,A ) def __lowercase ( self : Dict ,A : torch.Tensor ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = input for layer in self.layers: UpperCAmelCase__ : Optional[int] = layer(A ) return hidden_state class __lowercase ( nn.Module ): def __init__( self : Union[str, Any] ,A : Tuple[int, ...] ,A : int ,A : int ,A : bool ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Dict = pool_scales UpperCAmelCase__ : Optional[int] = align_corners UpperCAmelCase__ : Optional[int] = in_channels UpperCAmelCase__ : List[str] = channels UpperCAmelCase__ : Tuple = [] for i, pool_scale in enumerate(A ): UpperCAmelCase__ : str = UperNetPyramidPoolingBlock(pool_scale=A ,in_channels=A ,channels=A ) self.blocks.append(A ) self.add_module(str(A ) ,A ) def __lowercase ( self : List[Any] ,A : torch.Tensor ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [] for ppm in self.blocks: UpperCAmelCase__ : Tuple = ppm(A ) UpperCAmelCase__ : List[str] = nn.functional.interpolate( A ,size=x.size()[2:] ,mode="""bilinear""" ,align_corners=self.align_corners ) ppm_outs.append(A ) return ppm_outs class __lowercase ( nn.Module ): def __init__( self : Union[str, Any] ,A : str ,A : Optional[Any] ): '''simple docstring''' super().__init__() UpperCAmelCase__ : str = config UpperCAmelCase__ : Union[str, Any] = config.pool_scales # e.g. (1, 2, 3, 6) UpperCAmelCase__ : Tuple = in_channels UpperCAmelCase__ : Union[str, Any] = config.hidden_size UpperCAmelCase__ : str = False UpperCAmelCase__ : Optional[Any] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 ) # PSP Module UpperCAmelCase__ : int = UperNetPyramidPoolingModule( self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,) UpperCAmelCase__ : Tuple = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,) # FPN Module UpperCAmelCase__ : Tuple = nn.ModuleList() UpperCAmelCase__ : Optional[int] = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer UpperCAmelCase__ : Optional[Any] = UperNetConvModule(A ,self.channels ,kernel_size=1 ) UpperCAmelCase__ : Dict = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 ) self.lateral_convs.append(A ) self.fpn_convs.append(A ) UpperCAmelCase__ : Tuple = UperNetConvModule( len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,) def __lowercase ( self : Dict ): '''simple docstring''' self.apply(self._init_weights ) def __lowercase ( self : Dict ,A : List[Any] ): '''simple docstring''' if isinstance(A ,nn.Convad ): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __lowercase ( self : str ,A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = inputs[-1] UpperCAmelCase__ : Any = [x] psp_outs.extend(self.psp_modules(A ) ) UpperCAmelCase__ : Union[str, Any] = torch.cat(A ,dim=1 ) UpperCAmelCase__ : Dict = self.bottleneck(A ) return output def __lowercase ( self : List[Any] ,A : torch.Tensor ): '''simple docstring''' # build laterals UpperCAmelCase__ : Optional[int] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(A ) ) # build top-down path UpperCAmelCase__ : Optional[int] = len(A ) for i in range(used_backbone_levels - 1 ,0 ,-1 ): UpperCAmelCase__ : Optional[Any] = laterals[i - 1].shape[2:] UpperCAmelCase__ : List[str] = laterals[i - 1] + nn.functional.interpolate( laterals[i] ,size=A ,mode="""bilinear""" ,align_corners=self.align_corners ) # build outputs UpperCAmelCase__ : List[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 ,0 ,-1 ): UpperCAmelCase__ : List[Any] = nn.functional.interpolate( fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode="""bilinear""" ,align_corners=self.align_corners ) UpperCAmelCase__ : str = torch.cat(A ,dim=1 ) UpperCAmelCase__ : Dict = self.fpn_bottleneck(A ) UpperCAmelCase__ : Tuple = self.classifier(A ) return output class __lowercase ( nn.Module ): def __init__( self : Optional[Any] ,A : Tuple ,A : int = 2 ,A : int = 3 ,A : Union[int, Tuple[int, int]] = 1 ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[Any] = config UpperCAmelCase__ : List[str] = config.auxiliary_in_channels UpperCAmelCase__ : int = config.auxiliary_channels UpperCAmelCase__ : List[Any] = config.auxiliary_num_convs UpperCAmelCase__ : Any = config.auxiliary_concat_input UpperCAmelCase__ : Tuple = in_index UpperCAmelCase__ : Optional[int] = (kernel_size // 2) * dilation UpperCAmelCase__ : Optional[Any] = [] convs.append( UperNetConvModule( self.in_channels ,self.channels ,kernel_size=A ,padding=A ,dilation=A ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels ,self.channels ,kernel_size=A ,padding=A ,dilation=A ) ) if self.num_convs == 0: UpperCAmelCase__ : Tuple = nn.Identity() else: UpperCAmelCase__ : List[str] = nn.Sequential(*A ) if self.concat_input: UpperCAmelCase__ : str = UperNetConvModule( self.in_channels + self.channels ,self.channels ,kernel_size=A ,padding=kernel_size // 2 ) UpperCAmelCase__ : int = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 ) def __lowercase ( self : Optional[int] ): '''simple docstring''' self.apply(self._init_weights ) def __lowercase ( self : Any ,A : Any ): '''simple docstring''' if isinstance(A ,nn.Convad ): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __lowercase ( self : Optional[int] ,A : torch.Tensor ): '''simple docstring''' # just take the relevant feature maps UpperCAmelCase__ : int = encoder_hidden_states[self.in_index] UpperCAmelCase__ : int = self.convs(A ) if self.concat_input: UpperCAmelCase__ : str = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) ) UpperCAmelCase__ : Tuple = self.classifier(A ) return output class __lowercase ( __lowerCamelCase ): snake_case_ = UperNetConfig snake_case_ = """pixel_values""" snake_case_ = True def __lowercase ( self : Dict ,A : str ): '''simple docstring''' if isinstance(A ,A ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def __lowercase ( self : str ): '''simple docstring''' self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def __lowercase ( self : Union[str, Any] ,A : List[Any] ,A : int=False ): '''simple docstring''' if isinstance(A ,A ): UpperCAmelCase__ : str = value __UpperCAmelCase = r'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __UpperCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( """UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , __lowerCamelCase , ) class __lowercase ( __lowerCamelCase ): def __init__( self : Dict ,A : Optional[int] ): '''simple docstring''' super().__init__(A ) UpperCAmelCase__ : Any = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) UpperCAmelCase__ : Any = UperNetHead(A ,in_channels=self.backbone.channels ) UpperCAmelCase__ : Dict = UperNetFCNHead(A ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) ) @replace_return_docstrings(output_type=A ,config_class=_CONFIG_FOR_DOC ) def __lowercase ( self : Optional[int] ,A : Optional[torch.Tensor] = None ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : Optional[torch.Tensor] = None ,A : Optional[bool] = None ,): '''simple docstring''' UpperCAmelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase__ : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase__ : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions UpperCAmelCase__ : int = self.backbone.forward_with_filtered_kwargs( A ,output_hidden_states=A ,output_attentions=A ) UpperCAmelCase__ : List[Any] = outputs.feature_maps UpperCAmelCase__ : int = self.decode_head(A ) UpperCAmelCase__ : Optional[Any] = nn.functional.interpolate(A ,size=pixel_values.shape[2:] ,mode="""bilinear""" ,align_corners=A ) UpperCAmelCase__ : int = None if self.auxiliary_head is not None: UpperCAmelCase__ : Union[str, Any] = self.auxiliary_head(A ) UpperCAmelCase__ : Tuple = nn.functional.interpolate( A ,size=pixel_values.shape[2:] ,mode="""bilinear""" ,align_corners=A ) UpperCAmelCase__ : Optional[int] = None if labels is not None: if self.config.num_labels == 1: raise ValueError("""The number of labels should be greater than one""" ) else: # compute weighted loss UpperCAmelCase__ : Optional[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) UpperCAmelCase__ : Optional[int] = loss_fct(A ,A ) UpperCAmelCase__ : Any = loss_fct(A ,A ) UpperCAmelCase__ : int = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: UpperCAmelCase__ : Optional[int] = (logits,) + outputs[1:] else: UpperCAmelCase__ : Dict = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=A ,logits=A ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
65
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def lowerCAmelCase ( __UpperCamelCase = "isbn/0140328726" ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: UpperCAmelCase__ : Dict = F"{olid} is not a valid Open Library olid" raise ValueError(__UpperCamelCase ) return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json() def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Any = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } UpperCAmelCase__ : Dict = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} UpperCAmelCase__ : str = [ get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""] ] UpperCAmelCase__ : Dict = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : Dict = """, """.join(__UpperCamelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __UpperCAmelCase = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.") continue print(F"\nSearching Open Library for ISBN: {isbn}...\n") try: __UpperCAmelCase = summarize_book(get_openlibrary_data(F"isbn/{isbn}")) print('\n'.join(F"{key}: {value}" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"Sorry, there are no results for ISBN: {isbn}.")
65
1
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__UpperCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__UpperCamelCase , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__UpperCamelCase ) return parser.parse_args() def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = parse_args() # Import training_script as a module. UpperCAmelCase__ : Union[str, Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) UpperCAmelCase__ : Any = script_fpath.stem UpperCAmelCase__ : str = importlib.import_module(__UpperCamelCase ) # Patch sys.argv UpperCAmelCase__ : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
65
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase="pt" ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {"""add_prefix_space""": True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(""" """ ) else {} UpperCAmelCase__ : List[str] = padding_side return tokenizer( [line] , max_length=__UpperCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , ): '''simple docstring''' UpperCAmelCase__ : str = input_ids.ne(__UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowercase ( __lowerCamelCase ): def __init__( self : Tuple ,A : List[Any] ,A : Union[str, Any] ,A : Any ,A : Optional[int] ,A : Union[str, Any]="train" ,A : Tuple=None ,A : Union[str, Any]=None ,A : Tuple=None ,A : int="" ,): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[Any] = Path(A ).joinpath(type_path + """.source""" ) UpperCAmelCase__ : List[str] = Path(A ).joinpath(type_path + """.target""" ) UpperCAmelCase__ : Dict = self.get_char_lens(self.src_file ) UpperCAmelCase__ : int = max_source_length UpperCAmelCase__ : List[str] = max_target_length assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}" UpperCAmelCase__ : Dict = tokenizer UpperCAmelCase__ : str = prefix if n_obs is not None: UpperCAmelCase__ : int = self.src_lens[:n_obs] UpperCAmelCase__ : Any = src_lang UpperCAmelCase__ : Any = tgt_lang def __len__( self : Optional[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = index + 1 # linecache starts at 1 UpperCAmelCase__ : Tuple = self.prefix + linecache.getline(str(self.src_file ) ,A ).rstrip("""\n""" ) UpperCAmelCase__ : Dict = linecache.getline(str(self.tgt_file ) ,A ).rstrip("""\n""" ) assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,A ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCAmelCase__ : str = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,A ) else self.tokenizer ) UpperCAmelCase__ : Tuple = self.tokenizer.generator if isinstance(self.tokenizer ,A ) else self.tokenizer UpperCAmelCase__ : Tuple = encode_line(A ,A ,self.max_source_length ,"""right""" ) UpperCAmelCase__ : Dict = encode_line(A ,A ,self.max_target_length ,"""right""" ) UpperCAmelCase__ : Optional[Any] = source_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : List[str] = target_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def __lowercase ( A : int ): '''simple docstring''' return [len(A ) for x in Path(A ).open().readlines()] def __lowercase ( self : List[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : int = torch.stack([x["""input_ids"""] for x in batch] ) UpperCAmelCase__ : Union[str, Any] = torch.stack([x["""attention_mask"""] for x in batch] ) UpperCAmelCase__ : Any = torch.stack([x["""decoder_input_ids"""] for x in batch] ) UpperCAmelCase__ : List[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : Any = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : str = trim_batch(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = trim_batch(A ,A ,attention_mask=A ) UpperCAmelCase__ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return list(itertools.chain.from_iterable(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Dict = get_git_info() save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , """git_log.json""" ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=4 , **__UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """w""" ) as f: json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase ) as f: return json.load(__UpperCamelCase ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = git.Repo(search_parent_directories=__UpperCamelCase ) UpperCAmelCase__ : List[str] = { """repo_id""": str(__UpperCamelCase ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return list(map(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """wb""" ) as f: return pickle.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' def remove_articles(__UpperCamelCase ): return re.sub(r"""\b(a|an|the)\b""" , """ """ , __UpperCamelCase ) def white_space_fix(__UpperCamelCase ): return " ".join(text.split() ) def remove_punc(__UpperCamelCase ): UpperCAmelCase__ : List[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : Dict = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : int = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase ) UpperCAmelCase__ : List[str] = sum(common.values() ) if num_same == 0: return 0 UpperCAmelCase__ : str = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : Optional[int] = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : Tuple = (2 * precision * recall) / (precision + recall) return fa def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' assert len(__UpperCamelCase ) == len(__UpperCamelCase ) UpperCAmelCase__ : Union[str, Any] = 0 for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ): em += exact_match_score(__UpperCamelCase , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: em /= len(__UpperCamelCase ) return {"em": em} def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCAmelCase__ : str = """dropout_rate""" for p in extra_params: if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(__UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) continue UpperCAmelCase__ : Tuple = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p] setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) return hparams, config
65
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = { 'configuration_mobilebert': [ 'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileBertConfig', 'MobileBertOnnxConfig', ], 'tokenization_mobilebert': ['MobileBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['MobileBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileBertForMaskedLM', 'MobileBertForMultipleChoice', 'MobileBertForNextSentencePrediction', 'MobileBertForPreTraining', 'MobileBertForQuestionAnswering', 'MobileBertForSequenceClassification', 'MobileBertForTokenClassification', 'MobileBertLayer', 'MobileBertModel', 'MobileBertPreTrainedModel', 'load_tf_weights_in_mobilebert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileBertForMaskedLM', 'TFMobileBertForMultipleChoice', 'TFMobileBertForNextSentencePrediction', 'TFMobileBertForPreTraining', 'TFMobileBertForQuestionAnswering', 'TFMobileBertForSequenceClassification', 'TFMobileBertForTokenClassification', 'TFMobileBertMainLayer', 'TFMobileBertModel', 'TFMobileBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
65
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaControlnetPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def __lowercase ( self : int ): '''simple docstring''' return 32 @property def __lowercase ( self : Dict ): '''simple docstring''' return self.time_input_dim @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __lowercase ( self : Any ): '''simple docstring''' return 100 @property def __lowercase ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase__ : int = UNetaDConditionModel(**A ) return model @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowercase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs ) return model def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.dummy_unet UpperCAmelCase__ : List[Any] = self.dummy_movq UpperCAmelCase__ : List[Any] = DDIMScheduler( num_train_timesteps=1_000 ,beta_schedule="""linear""" ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=A ,set_alpha_to_one=A ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=A ,) UpperCAmelCase__ : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __lowercase ( self : str ,A : Optional[Any] ,A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( A ) # create hint UpperCAmelCase__ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase__ : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase__ : Dict = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase__ : Dict = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = """cpu""" UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Union[str, Any] = self.pipeline_class(**A ) UpperCAmelCase__ : Optional[int] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(A ) ) UpperCAmelCase__ : Tuple = output.images UpperCAmelCase__ : Dict = pipe( **self.get_dummy_inputs(A ) ,return_dict=A ,)[0] UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Optional[int] = np.array( [0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def __lowercase ( self : Union[str, Any] ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) UpperCAmelCase__ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) UpperCAmelCase__ : int = torch.from_numpy(np.array(A ) ).float() / 2_5_5.0 UpperCAmelCase__ : Union[str, Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) UpperCAmelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(A ) UpperCAmelCase__ : List[Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) UpperCAmelCase__ : int = pipeline.to(A ) pipeline.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[Any] = """A robot, 4k photo""" UpperCAmelCase__ : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = pipe_prior( A ,generator=A ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() UpperCAmelCase__ : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ : int = pipeline( image_embeds=A ,negative_image_embeds=A ,hint=A ,generator=A ,num_inference_steps=100 ,output_type="""np""" ,) UpperCAmelCase__ : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A ,A )
65
1
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class __lowercase ( __lowerCamelCase ): snake_case_ = """Wav2Vec2FeatureExtractor""" snake_case_ = """AutoTokenizer""" def __init__( self : List[Any] ,A : Dict ,A : Optional[int] ): '''simple docstring''' super().__init__(A ,A ) UpperCAmelCase__ : Optional[int] = self.feature_extractor UpperCAmelCase__ : int = False @classmethod def __lowercase ( cls : Any ,A : Optional[Any] ,**A : int ): '''simple docstring''' try: return super().from_pretrained(A ,**A ) except OSError: warnings.warn( f"Loading a tokenizer inside {cls.__name__} from a config that does not" """ include a `tokenizer_class` attribute is deprecated and will be """ """removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`""" """ attribute to either your `config.json` or `tokenizer_config.json` """ """file to suppress this warning: """ ,A ,) UpperCAmelCase__ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(A ,**A ) UpperCAmelCase__ : Dict = WavaVecaCTCTokenizer.from_pretrained(A ,**A ) return cls(feature_extractor=A ,tokenizer=A ) def __call__( self : str ,*A : Optional[Any] ,**A : str ): '''simple docstring''' # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A ,**A ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) UpperCAmelCase__ : int = kwargs.pop("""raw_speech""" ) else: UpperCAmelCase__ : List[str] = kwargs.pop("""audio""" ,A ) UpperCAmelCase__ : Any = kwargs.pop("""sampling_rate""" ,A ) UpperCAmelCase__ : Tuple = kwargs.pop("""text""" ,A ) if len(A ) > 0: UpperCAmelCase__ : List[str] = args[0] UpperCAmelCase__ : List[str] = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: UpperCAmelCase__ : List[str] = self.feature_extractor(A ,*A ,sampling_rate=A ,**A ) if text is not None: UpperCAmelCase__ : str = self.tokenizer(A ,**A ) if text is None: return inputs elif audio is None: return encodings else: UpperCAmelCase__ : str = encodings["""input_ids"""] return inputs def __lowercase ( self : Dict ,*A : str ,**A : Union[str, Any] ): '''simple docstring''' # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*A ,**A ) UpperCAmelCase__ : Dict = kwargs.pop("""input_features""" ,A ) UpperCAmelCase__ : int = kwargs.pop("""labels""" ,A ) if len(A ) > 0: UpperCAmelCase__ : int = args[0] UpperCAmelCase__ : List[str] = args[1:] if input_features is not None: UpperCAmelCase__ : List[str] = self.feature_extractor.pad(A ,*A ,**A ) if labels is not None: UpperCAmelCase__ : Optional[int] = self.tokenizer.pad(A ,**A ) if labels is None: return input_features elif input_features is None: return labels else: UpperCAmelCase__ : List[str] = labels["""input_ids"""] return input_features def __lowercase ( self : int ,*A : Dict ,**A : List[Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*A ,**A ) def __lowercase ( self : Optional[int] ,*A : Optional[Any] ,**A : Dict ): '''simple docstring''' return self.tokenizer.decode(*A ,**A ) @contextmanager def __lowercase ( self : List[Any] ): '''simple docstring''' warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) UpperCAmelCase__ : int = True UpperCAmelCase__ : Optional[int] = self.tokenizer yield UpperCAmelCase__ : Union[str, Any] = self.feature_extractor UpperCAmelCase__ : List[str] = False
65
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = """vision-encoder-decoder""" snake_case_ = True def __init__( self : List[Any] ,**A : Union[str, Any] ): '''simple docstring''' super().__init__(**A ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" ) UpperCAmelCase__ : int = kwargs.pop("""encoder""" ) UpperCAmelCase__ : int = encoder_config.pop("""model_type""" ) UpperCAmelCase__ : str = kwargs.pop("""decoder""" ) UpperCAmelCase__ : Dict = decoder_config.pop("""model_type""" ) UpperCAmelCase__ : List[Any] = AutoConfig.for_model(A ,**A ) UpperCAmelCase__ : Any = AutoConfig.for_model(A ,**A ) UpperCAmelCase__ : Union[str, Any] = True @classmethod def __lowercase ( cls : List[Any] ,A : PretrainedConfig ,A : PretrainedConfig ,**A : Tuple ): '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) UpperCAmelCase__ : Union[str, Any] = True UpperCAmelCase__ : List[Any] = True return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**A ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Dict = self.encoder.to_dict() UpperCAmelCase__ : Any = self.decoder.to_dict() UpperCAmelCase__ : Dict = self.__class__.model_type return output class __lowercase ( __lowerCamelCase ): snake_case_ = version.parse("""1.11""" ) @property def __lowercase ( self : Optional[int] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowercase ( self : List[Any] ): '''simple docstring''' return 1e-4 @property def __lowercase ( self : List[Any] ): '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class __lowercase ( __lowerCamelCase ): @property def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : int = OrderedDict() UpperCAmelCase__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase__ : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase__ : List[str] = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def __lowercase ( self : Dict ,A : "PreTrainedTokenizerBase" ,A : int = -1 ,A : int = -1 ,A : bool = False ,A : Optional["TensorType"] = None ,): '''simple docstring''' import torch UpperCAmelCase__ : int = OrderedDict() UpperCAmelCase__ : List[Any] = super().generate_dummy_inputs( A ,batch_size=A ,seq_length=A ,is_pair=A ,framework=A ) UpperCAmelCase__ , UpperCAmelCase__ : int = dummy_input["""input_ids"""].shape UpperCAmelCase__ : int = (batch, encoder_sequence, self._config.encoder_hidden_size) UpperCAmelCase__ : Tuple = dummy_input.pop("""input_ids""" ) UpperCAmelCase__ : Optional[int] = dummy_input.pop("""attention_mask""" ) UpperCAmelCase__ : Dict = torch.zeros(A ) return common_inputs class __lowercase ( __lowerCamelCase ): @property def __lowercase ( self : str ): '''simple docstring''' pass def __lowercase ( self : Any ,A : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(A ) def __lowercase ( self : Dict ,A : PretrainedConfig ,A : PretrainedConfig ,A : str = "default" ): '''simple docstring''' UpperCAmelCase__ : List[str] = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(A ,A )
65
1
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() __UpperCAmelCase = { 'bart': ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'bert': ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-base-cased-finetuned-mrpc': ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'dpr': ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'gpt2': ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlnet': ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm': ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm-roberta': ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'transfo-xl': ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'openai-gpt': ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'roberta': ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'layoutlm': ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'roberta-large-mnli': ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'camembert': ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'flaubert': ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert': ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert-base-distilled-squad': ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert': ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert-visual-feature-encoder': ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'ctrl': ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'albert': ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 't5': ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'electra': ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'wav2vec2': ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=True ): '''simple docstring''' if model_type not in MODEL_CLASSES: raise ValueError(F"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: UpperCAmelCase__ : str = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models ) UpperCAmelCase__ : Tuple = config_class.from_json_file(__UpperCamelCase ) UpperCAmelCase__ : Union[str, Any] = True UpperCAmelCase__ : List[str] = True print(F"Building TensorFlow model from configuration: {config}" ) UpperCAmelCase__ : int = model_class(__UpperCamelCase ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): UpperCAmelCase__ : Union[str, Any] = cached_file( __UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: UpperCAmelCase__ : int = load_pytorch_checkpoint_in_tfa_model(__UpperCamelCase , __UpperCamelCase ) if compare_with_pt_model: UpperCAmelCase__ : List[Any] = tf_model(tf_model.dummy_inputs , training=__UpperCamelCase ) # build the network UpperCAmelCase__ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" ) UpperCAmelCase__ : Any = pt_model_class.from_pretrained( pretrained_model_name_or_path=__UpperCamelCase , config=__UpperCamelCase , state_dict=__UpperCamelCase ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = pt_model(**pt_model.dummy_inputs ) UpperCAmelCase__ : int = pto[0].numpy() UpperCAmelCase__ : int = tfo[0].numpy() UpperCAmelCase__ : Optional[Any] = np.amax(np.abs(np_pt - np_tf ) ) print(F"Max absolute difference between models outputs {diff}" ) assert diff <= 2e-2, F"Error, model absolute difference is >2e-2: {diff}" # Save pytorch-model print(F"Save TensorFlow model to {tf_dump_path}" ) tf_model.save_weights(__UpperCamelCase , save_format="""h5""" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , ): '''simple docstring''' if args_model_type is None: UpperCAmelCase__ : str = list(MODEL_CLASSES.keys() ) else: UpperCAmelCase__ : int = [args_model_type] for j, model_type in enumerate(__UpperCamelCase , start=1 ): print("""=""" * 100 ) print(F" Converting model type {j}/{len(__UpperCamelCase )}: {model_type}" ) print("""=""" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(F"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: UpperCAmelCase__ : str = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: UpperCAmelCase__ : List[str] = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(__UpperCamelCase , __UpperCamelCase ) , start=1 ): print("""-""" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F" Skipping finetuned checkpoint {model_shortcut_name}" ) continue UpperCAmelCase__ : Dict = model_shortcut_name elif only_convert_finetuned_models: print(F" Skipping not finetuned checkpoint {model_shortcut_name}" ) continue print( F" Converting checkpoint {i}/{len(__UpperCamelCase )}: {model_shortcut_name} - model_type {model_type}" ) print("""-""" * 100 ) if config_shortcut_name in aws_config_map: UpperCAmelCase__ : int = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models ) else: UpperCAmelCase__ : Union[str, Any] = config_shortcut_name if model_shortcut_name in aws_model_maps: UpperCAmelCase__ : Optional[Any] = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models ) else: UpperCAmelCase__ : Any = model_shortcut_name if os.path.isfile(__UpperCamelCase ): UpperCAmelCase__ : int = """converted_model""" convert_pt_checkpoint_to_tf( model_type=__UpperCamelCase , pytorch_checkpoint_path=__UpperCamelCase , config_file=__UpperCamelCase , tf_dump_path=os.path.join(__UpperCamelCase , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__UpperCamelCase , ) if remove_cached_files: os.remove(__UpperCamelCase ) os.remove(__UpperCamelCase ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.' ) parser.add_argument( '--model_type', default=None, type=str, help=( F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and " 'convert all the models from AWS.' ), ) parser.add_argument( '--pytorch_checkpoint_path', default=None, type=str, help=( 'Path to the PyTorch checkpoint path or shortcut name to download from AWS. ' 'If not given, will download and convert all the checkpoints from AWS.' ), ) parser.add_argument( '--config_file', default=None, type=str, help=( 'The config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture. If not given and ' '--pytorch_checkpoint_path is not given or is a shortcut name ' 'use the configuration associated to the shortcut name on the AWS' ), ) parser.add_argument( '--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.' ) parser.add_argument( '--use_cached_models', action='store_true', help='Use cached models if possible instead of updating to latest checkpoint versions.', ) parser.add_argument( '--remove_cached_files', action='store_true', help='Remove pytorch models after conversion (save memory when converting in batches).', ) parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.') __UpperCAmelCase = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
65
"""simple docstring""" import requests def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Tuple = {"""Content-Type""": """application/json"""} UpperCAmelCase__ : Optional[Any] = requests.post(__UpperCamelCase , json={"""text""": message_body} , headers=__UpperCamelCase ) if response.status_code != 200: UpperCAmelCase__ : Any = ( """Request to slack returned an error """ F"{response.status_code}, the response is:\n{response.text}" ) raise ValueError(__UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
65
1
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__lowerCamelCase ) class __lowercase ( __lowerCamelCase ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization snake_case_ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) snake_case_ = Features({"""text""": Value("""string""" )} ) snake_case_ = Features({"""labels""": ClassLabel} ) snake_case_ = "text" snake_case_ = "labels" def __lowercase ( self : Dict ,A : int ): '''simple docstring''' if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features." ) if not isinstance(features[self.label_column] ,A ): raise ValueError(f"Column {self.label_column} is not a ClassLabel." ) UpperCAmelCase__ : Dict = copy.deepcopy(self ) UpperCAmelCase__ : Optional[int] = self.label_schema.copy() UpperCAmelCase__ : Dict = features[self.label_column] UpperCAmelCase__ : Optional[int] = label_schema return task_template @property def __lowercase ( self : Optional[int] ): '''simple docstring''' return { self.text_column: "text", self.label_column: "labels", }
65
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = CTRLTokenizer snake_case_ = False snake_case_ = False def __lowercase ( self : List[str] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase__ : Optional[int] = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase__ : int = {"""unk_token""": """<unk>"""} UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def __lowercase ( self : int ,**A : Dict ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : List[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = """adapt react readapt apt""" UpperCAmelCase__ : Any = """adapt react readapt apt""" return input_text, output_text def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase__ : Tuple = """adapt react readapt apt""" UpperCAmelCase__ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase__ : Dict = tokenizer.tokenize(A ) self.assertListEqual(A ,A ) UpperCAmelCase__ : Any = tokens + [tokenizer.unk_token] UpperCAmelCase__ : Dict = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
65
1
"""simple docstring""" from __future__ import annotations def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if len(__UpperCamelCase ) <= 1 or n <= 1: return insert_next(__UpperCamelCase , n - 1 ) rec_insertion_sort(__UpperCamelCase , n - 1 ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if index >= len(__UpperCamelCase ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order UpperCAmelCase__ , UpperCAmelCase__ : Any = ( collection[index], collection[index - 1], ) insert_next(__UpperCamelCase , index + 1 ) if __name__ == "__main__": __UpperCAmelCase = input('Enter integers separated by spaces: ') __UpperCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
65
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
65
1
"""simple docstring""" from sklearn.metrics import fa_score import datasets __UpperCAmelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' __UpperCAmelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' __UpperCAmelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): def __lowercase ( self : List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,) def __lowercase ( self : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Optional[Any]=None ,A : List[str]=1 ,A : Optional[Any]="binary" ,A : Any=None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = fa_score( A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A ) return {"f1": float(A ) if score.size == 1 else score}
65
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """is_longer"""] def __init__( self : str ,A : Union[str, Any]=64 ,A : Tuple=48_000 ,A : Dict=480 ,A : List[str]=10 ,A : str=1_024 ,A : Any=0.0 ,A : Optional[int]=False ,A : float = 0 ,A : float = 14_000 ,A : int = None ,A : str = "fusion" ,A : str = "repeatpad" ,**A : List[Any] ,): '''simple docstring''' super().__init__( feature_size=A ,sampling_rate=A ,padding_value=A ,return_attention_mask=A ,**A ,) UpperCAmelCase__ : List[Any] = top_db UpperCAmelCase__ : Union[str, Any] = truncation UpperCAmelCase__ : Optional[int] = padding UpperCAmelCase__ : List[Any] = fft_window_size UpperCAmelCase__ : Optional[Any] = (fft_window_size >> 1) + 1 UpperCAmelCase__ : Any = hop_length UpperCAmelCase__ : List[str] = max_length_s UpperCAmelCase__ : List[Any] = max_length_s * sampling_rate UpperCAmelCase__ : List[Any] = sampling_rate UpperCAmelCase__ : Optional[int] = frequency_min UpperCAmelCase__ : Tuple = frequency_max UpperCAmelCase__ : List[str] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm=A ,mel_scale="""htk""" ,) UpperCAmelCase__ : str = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm="""slaney""" ,mel_scale="""slaney""" ,) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __lowercase ( self : List[str] ,A : np.array ,A : Optional[np.array] = None ): '''simple docstring''' UpperCAmelCase__ : Dict = spectrogram( A ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=A ,log_mel="""dB""" ,) return log_mel_spectrogram.T def __lowercase ( self : Optional[Any] ,A : Union[str, Any] ,A : int ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : List[str] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : int = [0] # randomly choose index for each part UpperCAmelCase__ : Tuple = np.random.choice(ranges[0] ) UpperCAmelCase__ : Tuple = np.random.choice(ranges[1] ) UpperCAmelCase__ : str = np.random.choice(ranges[2] ) UpperCAmelCase__ : List[str] = mel[idx_front : idx_front + chunk_frames, :] UpperCAmelCase__ : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] UpperCAmelCase__ : Dict = mel[idx_back : idx_back + chunk_frames, :] UpperCAmelCase__ : Optional[Any] = torch.tensor(mel[None, None, :] ) UpperCAmelCase__ : int = torch.nn.functional.interpolate( A ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=A ) UpperCAmelCase__ : Dict = mel_shrink[0][0].numpy() UpperCAmelCase__ : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def __lowercase ( self : Any ,A : np.array ,A : Optional[int] ,A : Any ,A : Tuple ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": UpperCAmelCase__ : int = True # random crop to max_length (for compatibility) -> this should be handled by self.pad UpperCAmelCase__ : str = len(A ) - max_length UpperCAmelCase__ : Optional[Any] = np.random.randint(0 ,overflow + 1 ) UpperCAmelCase__ : Optional[int] = waveform[idx : idx + max_length] UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": UpperCAmelCase__ : Tuple = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed UpperCAmelCase__ : int = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. UpperCAmelCase__ : List[Any] = np.stack([mel, mel, mel, mel] ,axis=0 ) UpperCAmelCase__ : Any = False else: UpperCAmelCase__ : Union[str, Any] = self._random_mel_fusion(A ,A ,A ) UpperCAmelCase__ : List[str] = True else: raise NotImplementedError(f"data_truncating {truncation} not implemented" ) else: UpperCAmelCase__ : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": UpperCAmelCase__ : str = int(max_length / len(A ) ) UpperCAmelCase__ : int = np.stack(np.tile(A ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": UpperCAmelCase__ : List[Any] = int(max_length / len(A ) ) UpperCAmelCase__ : str = np.stack(np.tile(A ,A ) ) UpperCAmelCase__ : Optional[Any] = np.pad(A ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": UpperCAmelCase__ : int = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : str ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : str = None ,A : Optional[str] = None ,A : Optional[int] = None ,A : Optional[int] = None ,A : Optional[Union[str, TensorType]] = None ,**A : List[str] ,): '''simple docstring''' UpperCAmelCase__ : Optional[int] = truncation if truncation is not None else self.truncation UpperCAmelCase__ : Dict = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[int] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : List[str] = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Any = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [np.asarray(A )] # convert to mel spectrogram, truncate and pad if needed. UpperCAmelCase__ : Tuple = [ self._get_input_mel(A ,max_length if max_length else self.nb_max_samples ,A ,A ) for waveform in raw_speech ] UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Tuple = [] for mel, longer in padded_inputs: input_mel.append(A ) is_longer.append(A ) if truncation == "fusion" and sum(A ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer UpperCAmelCase__ : List[str] = np.random.randint(0 ,len(A ) ) UpperCAmelCase__ : int = True if isinstance(input_mel[0] ,A ): UpperCAmelCase__ : Tuple = [np.asarray(A ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool UpperCAmelCase__ : List[str] = [[longer] for longer in is_longer] UpperCAmelCase__ : List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} UpperCAmelCase__ : str = BatchFeature(A ) if return_tensors is not None: UpperCAmelCase__ : int = input_features.convert_to_tensors(A ) return input_features
65
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __UpperCAmelCase = None __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __UpperCAmelCase = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } __UpperCAmelCase = { 'albert-base-v1': 512, 'albert-large-v1': 512, 'albert-xlarge-v1': 512, 'albert-xxlarge-v1': 512, 'albert-base-v2': 512, 'albert-large-v2': 512, 'albert-xlarge-v2': 512, 'albert-xxlarge-v2': 512, } __UpperCAmelCase = '▁' class __lowercase ( __lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = AlbertTokenizer def __init__( self : Tuple ,A : Dict=None ,A : List[Any]=None ,A : Union[str, Any]=True ,A : List[Any]=True ,A : str=False ,A : List[str]="[CLS]" ,A : int="[SEP]" ,A : List[Any]="<unk>" ,A : Tuple="[SEP]" ,A : Dict="<pad>" ,A : int="[CLS]" ,A : Any="[MASK]" ,**A : Tuple ,): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCAmelCase__ : Union[str, Any] = ( AddedToken(A ,lstrip=A ,rstrip=A ,normalized=A ) if isinstance(A ,A ) else mask_token ) super().__init__( A ,tokenizer_file=A ,do_lower_case=A ,remove_space=A ,keep_accents=A ,bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,pad_token=A ,cls_token=A ,mask_token=A ,**A ,) UpperCAmelCase__ : Dict = do_lower_case UpperCAmelCase__ : Dict = remove_space UpperCAmelCase__ : Union[str, Any] = keep_accents UpperCAmelCase__ : Union[str, Any] = vocab_file UpperCAmelCase__ : Any = False if not self.vocab_file else True def __lowercase ( self : int ,A : List[int] ,A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Tuple = [self.sep_token_id] UpperCAmelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowercase ( self : Optional[Any] ,A : List[int] ,A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : str = [self.sep_token_id] UpperCAmelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self : Dict ,A : str ,A : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(A ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase__ : Optional[Any] = os.path.join( A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ): copyfile(self.vocab_file ,A ) return (out_vocab_file,)
65
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] ,A : Union[str, Any] ,A : Dict=7 ,A : Optional[int]=3 ,A : List[str]=18 ,A : Union[str, Any]=30 ,A : Tuple=400 ,A : Dict=True ,A : List[str]=None ,A : str=True ,A : Optional[Any]=False ,A : Optional[Any]=True ,A : List[str]=True ,A : Optional[int]=[0.5, 0.5, 0.5] ,A : List[str]=[0.5, 0.5, 0.5] ,): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : Union[str, Any] = image_size UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : Optional[int] = max_resolution UpperCAmelCase__ : str = do_resize UpperCAmelCase__ : Tuple = size if size is not None else {"""height""": 18, """width""": 20} UpperCAmelCase__ : List[str] = do_thumbnail UpperCAmelCase__ : Optional[int] = do_align_axis UpperCAmelCase__ : Union[str, Any] = do_pad UpperCAmelCase__ : Tuple = do_normalize UpperCAmelCase__ : Optional[Any] = image_mean UpperCAmelCase__ : List[Any] = image_std def __lowercase ( self : Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = DonutImageProcessor if is_vision_available() else None def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = DonutImageProcessingTester(self ) @property def __lowercase ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"""do_resize""" ) ) self.assertTrue(hasattr(A ,"""size""" ) ) self.assertTrue(hasattr(A ,"""do_thumbnail""" ) ) self.assertTrue(hasattr(A ,"""do_align_long_axis""" ) ) self.assertTrue(hasattr(A ,"""do_pad""" ) ) self.assertTrue(hasattr(A ,"""do_normalize""" ) ) self.assertTrue(hasattr(A ,"""image_mean""" ) ) self.assertTrue(hasattr(A ,"""image_std""" ) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 20} ) UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) ) self.assertEqual(image_processor.size ,{"""height""": 84, """width""": 42} ) def __lowercase ( self : Dict ): '''simple docstring''' pass @is_flaky() def __lowercase ( self : int ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : Tuple = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) @is_flaky() def __lowercase ( self : List[str] ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : Optional[int] = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) @is_flaky() def __lowercase ( self : Any ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : List[Any] = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,)
65
1
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class __lowercase ( __lowerCamelCase ): snake_case_ = ["""image_processor""", """tokenizer"""] snake_case_ = """BlipImageProcessor""" snake_case_ = """AutoTokenizer""" def __init__( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ): '''simple docstring''' super().__init__(A ,A ) # add QFormer tokenizer UpperCAmelCase__ : Optional[int] = qformer_tokenizer def __call__( self : List[str] ,A : ImageInput = None ,A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A : bool = True ,A : Union[bool, str, PaddingStrategy] = False ,A : Union[bool, str, TruncationStrategy] = None ,A : Optional[int] = None ,A : int = 0 ,A : Optional[int] = None ,A : Optional[bool] = None ,A : bool = False ,A : bool = False ,A : bool = False ,A : bool = False ,A : bool = False ,A : bool = True ,A : Optional[Union[str, TensorType]] = None ,**A : Optional[Any] ,): '''simple docstring''' if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) UpperCAmelCase__ : Optional[Any] = BatchFeature() if text is not None: UpperCAmelCase__ : Tuple = self.tokenizer( text=A ,add_special_tokens=A ,padding=A ,truncation=A ,max_length=A ,stride=A ,pad_to_multiple_of=A ,return_attention_mask=A ,return_overflowing_tokens=A ,return_special_tokens_mask=A ,return_offsets_mapping=A ,return_token_type_ids=A ,return_length=A ,verbose=A ,return_tensors=A ,**A ,) encoding.update(A ) UpperCAmelCase__ : Union[str, Any] = self.qformer_tokenizer( text=A ,add_special_tokens=A ,padding=A ,truncation=A ,max_length=A ,stride=A ,pad_to_multiple_of=A ,return_attention_mask=A ,return_overflowing_tokens=A ,return_special_tokens_mask=A ,return_offsets_mapping=A ,return_token_type_ids=A ,return_length=A ,verbose=A ,return_tensors=A ,**A ,) UpperCAmelCase__ : Union[str, Any] = qformer_text_encoding.pop("""input_ids""" ) UpperCAmelCase__ : List[str] = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: UpperCAmelCase__ : int = self.image_processor(A ,return_tensors=A ) encoding.update(A ) return encoding def __lowercase ( self : Tuple ,*A : Union[str, Any] ,**A : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*A ,**A ) def __lowercase ( self : Dict ,*A : Optional[Any] ,**A : int ): '''simple docstring''' return self.tokenizer.decode(*A ,**A ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = self.tokenizer.model_input_names UpperCAmelCase__ : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def __lowercase ( self : str ,A : Optional[Any] ,**A : str ): '''simple docstring''' if os.path.isfile(A ): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" ) os.makedirs(A ,exist_ok=A ) UpperCAmelCase__ : Union[str, Any] = os.path.join(A ,"""qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(A ) return super().save_pretrained(A ,**A ) @classmethod def __lowercase ( cls : List[str] ,A : Any ,**A : int ): '''simple docstring''' UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(A ,subfolder="""qformer_tokenizer""" ) UpperCAmelCase__ : str = cls._get_arguments_from_pretrained(A ,**A ) args.append(A ) return cls(*A )
65
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class __lowercase ( __lowerCamelCase ): snake_case_ = """open-llama""" def __init__( self : Dict ,A : str=100_000 ,A : str=4_096 ,A : Optional[Any]=11_008 ,A : Tuple=32 ,A : str=32 ,A : Optional[int]="silu" ,A : List[Any]=2_048 ,A : str=0.0_2 ,A : Optional[int]=1e-6 ,A : int=True ,A : Tuple=0 ,A : str=1 ,A : Any=2 ,A : Optional[Any]=False ,A : int=True ,A : Any=0.1 ,A : Optional[Any]=0.1 ,A : Optional[Any]=True ,A : Union[str, Any]=True ,A : Tuple=None ,**A : Optional[int] ,): '''simple docstring''' UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : Optional[int] = rms_norm_eps UpperCAmelCase__ : Any = use_cache UpperCAmelCase__ : Optional[Any] = kwargs.pop( """use_memorry_efficient_attention""" ,A ) UpperCAmelCase__ : Any = hidden_dropout_prob UpperCAmelCase__ : str = attention_dropout_prob UpperCAmelCase__ : Optional[int] = use_stable_embedding UpperCAmelCase__ : Tuple = shared_input_output_embedding UpperCAmelCase__ : Tuple = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,tie_word_embeddings=A ,**A ,) def __lowercase ( self : Optional[Any] ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,A ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f"got {self.rope_scaling}" ) UpperCAmelCase__ : List[Any] = self.rope_scaling.get("""type""" ,A ) UpperCAmelCase__ : int = self.rope_scaling.get("""factor""" ,A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(A ,A ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
65
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __UpperCAmelCase = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class __lowercase ( unittest.TestCase ): snake_case_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING snake_case_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: snake_case_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: snake_case_ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def __lowercase ( self : Tuple ,A : List[Any] ,A : Dict ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = ZeroShotClassificationPipeline( model=A ,tokenizer=A ,candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def __lowercase ( self : Optional[Any] ,A : List[str] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : int = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ) self.assertEqual(A ,{"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # No kwarg UpperCAmelCase__ : str = classifier("""Who are you voting for in 2020?""" ,["""politics"""] ) self.assertEqual(A ,{"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase__ : str = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics"""] ) self.assertEqual(A ,{"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase__ : Optional[int] = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics, public health""" ) self.assertEqual( A ,{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 ) UpperCAmelCase__ : Optional[Any] = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health"""] ) self.assertEqual( A ,{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 ) UpperCAmelCase__ : Optional[int] = classifier( """Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""This text is about {}""" ) self.assertEqual(A ,{"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase__ : Optional[int] = classifier(["""I am happy"""] ,["""positive""", """negative"""] ) self.assertEqual( A ,[ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(1 ) ] ,) UpperCAmelCase__ : Optional[int] = classifier(["""I am happy""", """I am sad"""] ,["""positive""", """negative"""] ) self.assertEqual( A ,[ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(2 ) ] ,) with self.assertRaises(A ): classifier("""""" ,candidate_labels="""politics""" ) with self.assertRaises(A ): classifier(A ,candidate_labels="""politics""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" ,candidate_labels="""""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" ,candidate_labels=A ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""Not formatting template""" ,) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template=A ,) self.run_entailment_id(A ) def __lowercase ( self : List[Any] ,A : Pipeline ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = zero_shot_classifier.model.config UpperCAmelCase__ : Any = config.labelaid UpperCAmelCase__ : Optional[int] = zero_shot_classifier.entailment_id UpperCAmelCase__ : List[Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id ,-1 ) UpperCAmelCase__ : int = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id ,0 ) UpperCAmelCase__ : int = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id ,0 ) UpperCAmelCase__ : int = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id ,2 ) UpperCAmelCase__ : Tuple = original_labelaid self.assertEqual(A ,zero_shot_classifier.entailment_id ) @require_torch def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = pipeline( """zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100 ,candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = pipeline( """zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,) UpperCAmelCase__ : List[Any] = zero_shot_classifier( """Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) ,{ """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } ,) @require_tf def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = pipeline( """zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""tf""" ,) UpperCAmelCase__ : Tuple = zero_shot_classifier( """Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) ,{ """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } ,) @slow @require_torch def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Any = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""pt""" ) UpperCAmelCase__ : Tuple = zero_shot_classifier( """Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) ,{ """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } ,) UpperCAmelCase__ : str = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=A ,) self.assertEqual( nested_simplify(A ) ,{ """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } ,) @slow @require_tf def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""tf""" ) UpperCAmelCase__ : Dict = zero_shot_classifier( """Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) ,{ """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } ,) UpperCAmelCase__ : Union[str, Any] = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=A ,) self.assertEqual( nested_simplify(A ) ,{ """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } ,)
65
"""simple docstring""" from collections.abc import Callable class __lowercase : def __init__( self : Tuple ,A : Callable | None = None ): '''simple docstring''' # Stores actual heap items. UpperCAmelCase__ : list = [] # Stores indexes of each item for supporting updates and deletion. UpperCAmelCase__ : dict = {} # Stores current size of heap. UpperCAmelCase__ : Any = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. UpperCAmelCase__ : int = key or (lambda A : x) def __lowercase ( self : Union[str, Any] ,A : int ): '''simple docstring''' return int((i - 1) / 2 ) if i > 0 else None def __lowercase ( self : Tuple ,A : int ): '''simple docstring''' UpperCAmelCase__ : Any = int(2 * i + 1 ) return left if 0 < left < self.size else None def __lowercase ( self : Any ,A : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = int(2 * i + 2 ) return right if 0 < right < self.size else None def __lowercase ( self : List[Any] ,A : int ,A : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.arr[j], self.arr[i] def __lowercase ( self : Optional[int] ,A : int ,A : int ): '''simple docstring''' return self.arr[i][1] < self.arr[j][1] def __lowercase ( self : Optional[int] ,A : int ): '''simple docstring''' UpperCAmelCase__ : int = self._left(A ) UpperCAmelCase__ : Dict = self._right(A ) UpperCAmelCase__ : Optional[int] = i if left is not None and not self._cmp(A ,A ): UpperCAmelCase__ : List[Any] = left if right is not None and not self._cmp(A ,A ): UpperCAmelCase__ : List[Any] = right return valid_parent def __lowercase ( self : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : int = self._parent(A ) while parent is not None and not self._cmp(A ,A ): self._swap(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : int = parent, self._parent(A ) def __lowercase ( self : str ,A : int ): '''simple docstring''' UpperCAmelCase__ : Any = self._get_valid_parent(A ) while valid_parent != index: self._swap(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = valid_parent, self._get_valid_parent(A ) def __lowercase ( self : Optional[Any] ,A : int ,A : int ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase__ : Tuple = self.pos_map[item] UpperCAmelCase__ : Dict = [item, self.key(A )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(A ) self._heapify_down(A ) def __lowercase ( self : List[Any] ,A : int ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase__ : Any = self.pos_map[item] del self.pos_map[item] UpperCAmelCase__ : Dict = self.arr[self.size - 1] UpperCAmelCase__ : List[Any] = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(A ) self._heapify_down(A ) def __lowercase ( self : str ,A : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : Dict = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(A )] ) else: UpperCAmelCase__ : List[str] = [item, self.key(A )] UpperCAmelCase__ : Union[str, Any] = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __lowercase ( self : str ): '''simple docstring''' return self.arr[0] if self.size else None def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def lowerCAmelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
65
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __UpperCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main UpperCAmelCase__ : str = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(__UpperCamelCase , id=__UpperCamelCase )
65
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """attention_mask"""] def __init__( self : Any ,A : str=80 ,A : Optional[int]=16_000 ,A : int=0.0 ,A : str=10 ,A : Any=25 ,A : str="hamming_window" ,A : int=3_2_7_6_8.0 ,A : List[str]=0.9_7 ,A : Optional[int]=1.0 ,A : Optional[Any]=True ,A : Tuple=True ,A : Any=False ,**A : int ,): '''simple docstring''' super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A ) UpperCAmelCase__ : str = feature_size UpperCAmelCase__ : int = sampling_rate UpperCAmelCase__ : int = padding_value UpperCAmelCase__ : Dict = hop_length UpperCAmelCase__ : int = win_length UpperCAmelCase__ : Dict = frame_signal_scale UpperCAmelCase__ : Dict = preemphasis_coeff UpperCAmelCase__ : str = mel_floor UpperCAmelCase__ : Any = normalize_means UpperCAmelCase__ : str = normalize_vars UpperCAmelCase__ : int = win_function UpperCAmelCase__ : List[Any] = return_attention_mask UpperCAmelCase__ : str = win_length * sampling_rate // 1_000 UpperCAmelCase__ : List[Any] = hop_length * sampling_rate // 1_000 UpperCAmelCase__ : int = optimal_fft_length(self.sample_size ) UpperCAmelCase__ : List[Any] = (self.n_fft // 2) + 1 def __lowercase ( self : Union[str, Any] ,A : np.array ): '''simple docstring''' if self.win_function == "hamming_window": UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A ) else: UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ) UpperCAmelCase__ : Union[str, Any] = mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,) UpperCAmelCase__ : Optional[Any] = spectrogram( one_waveform * self.frame_signal_scale ,window=A ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=A ,preemphasis=self.preemphasis_coeff ,mel_filters=A ,mel_floor=self.mel_floor ,log_mel="""log""" ,) return msfc_features.T def __lowercase ( self : str ,A : Any ,A : Optional[int] ,A : str ): '''simple docstring''' # make sure we normalize float32 arrays if self.normalize_means: UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 ) UpperCAmelCase__ : Any = np.subtract(A ,A ) if self.normalize_vars: UpperCAmelCase__ : str = x[:input_length].std(axis=0 ) UpperCAmelCase__ : Optional[int] = np.divide(A ,A ) if input_length < x.shape[0]: UpperCAmelCase__ : int = padding_value # make sure array is in float32 UpperCAmelCase__ : str = x.astype(np.floataa ) return x def __lowercase ( self : Union[str, Any] ,A : List[np.ndarray] ,A : Optional[np.ndarray] = None ): '''simple docstring''' UpperCAmelCase__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(A ,A ,self.padding_value ) for x, n in zip(A ,A )] def __call__( self : Union[str, Any] ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : Union[bool, str, PaddingStrategy] = False ,A : Optional[int] = None ,A : bool = False ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[Union[str, TensorType]] = None ,A : Optional[int] = None ,**A : Tuple ,): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[Any] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : Any = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : List[str] = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Union[str, Any] = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [raw_speech] # extract fbank features UpperCAmelCase__ : Tuple = [self._extract_mfsc_features(A ) for one_waveform in raw_speech] # convert into correct format for padding UpperCAmelCase__ : str = BatchFeature({"""input_features""": features} ) UpperCAmelCase__ : Optional[Any] = self.pad( A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,) # make sure list is in array format UpperCAmelCase__ : Tuple = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] ,A ): UpperCAmelCase__ : Union[str, Any] = [np.asarray(A ,dtype=np.floataa ) for feature in input_features] UpperCAmelCase__ : Dict = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: UpperCAmelCase__ : Union[str, Any] = ( np.array(A ,dtype=np.intaa ) if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) UpperCAmelCase__ : Any = self.normalize( padded_inputs["""input_features"""] ,attention_mask=A ) if return_tensors is not None: UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(A ) return padded_inputs
65
1
"""simple docstring""" from argparse import ArgumentParser from .env import EnvironmentCommand def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) UpperCAmelCase__ : List[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(__UpperCamelCase ) # Let's go UpperCAmelCase__ : int = parser.parse_args() if not hasattr(__UpperCamelCase , """func""" ): parser.print_help() exit(1 ) # Run UpperCAmelCase__ : Union[str, Any] = args.func(__UpperCamelCase ) service.run() if __name__ == "__main__": main()
65
"""simple docstring""" from math import factorial def lowerCAmelCase ( __UpperCamelCase = 100 ): '''simple docstring''' return sum(int(__UpperCamelCase ) for x in str(factorial(__UpperCamelCase ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
65
1
"""simple docstring""" # using dfs for finding eulerian path traversal def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: UpperCAmelCase__ , UpperCAmelCase__ : List[str] = True, True UpperCAmelCase__ : Any = dfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return path def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : str = 0 UpperCAmelCase__ : Union[str, Any] = -1 for i in range(__UpperCamelCase ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 UpperCAmelCase__ : str = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] UpperCAmelCase__ , UpperCAmelCase__ : int = check_circuit_or_path(__UpperCamelCase , __UpperCamelCase ) if check == 3: print("""graph is not Eulerian""" ) print("""no path""" ) return UpperCAmelCase__ : Dict = 1 if check == 2: UpperCAmelCase__ : List[str] = odd_node print("""graph has a Euler path""" ) if check == 1: print("""graph has a Euler cycle""" ) UpperCAmelCase__ : str = dfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) print(__UpperCamelCase ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : str = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} UpperCAmelCase__ : List[str] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} UpperCAmelCase__ : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} UpperCAmelCase__ : Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} UpperCAmelCase__ : List[str] = { 1: [], 2: [] # all degree is zero } UpperCAmelCase__ : Dict = 10 check_euler(__UpperCamelCase , __UpperCamelCase ) check_euler(__UpperCamelCase , __UpperCamelCase ) check_euler(__UpperCamelCase , __UpperCamelCase ) check_euler(__UpperCamelCase , __UpperCamelCase ) check_euler(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
65
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class __lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] ,A : Optional[int] ,A : int=13 ,A : Tuple=7 ,A : Dict=True ,A : Optional[int]=True ,A : Tuple=True ,A : str=True ,A : Any=99 ,A : Tuple=32 ,A : Dict=5 ,A : Optional[int]=4 ,A : Dict=37 ,A : Any="gelu" ,A : Any=0.1 ,A : Optional[int]=0.1 ,A : Union[str, Any]=512 ,A : Any=16 ,A : List[str]=2 ,A : List[Any]=0.0_2 ,A : Optional[int]=4 ,): '''simple docstring''' UpperCAmelCase__ : Dict = parent UpperCAmelCase__ : Any = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : Optional[int] = is_training UpperCAmelCase__ : Optional[Any] = use_attention_mask UpperCAmelCase__ : int = use_token_type_ids UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Any = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Dict = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : Any = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : List[Any] = type_vocab_size UpperCAmelCase__ : List[str] = type_sequence_label_size UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = num_choices def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase__ : List[str] = None if self.use_attention_mask: UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : int = DistilBertConfig( vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=A ,) return config, input_ids, attention_mask def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs UpperCAmelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = FlaxDistilBertModelTester(self ) @slow def __lowercase ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" ) UpperCAmelCase__ : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(A ) @require_flax class __lowercase ( unittest.TestCase ): @slow def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) UpperCAmelCase__ : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) UpperCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCAmelCase__ : Dict = model(A ,attention_mask=A )[0] UpperCAmelCase__ : List[Any] = (1, 11, 768) self.assertEqual(output.shape ,A ) UpperCAmelCase__ : Any = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,A ,atol=1e-4 ) )
65
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaControlnetPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def __lowercase ( self : int ): '''simple docstring''' return 32 @property def __lowercase ( self : Dict ): '''simple docstring''' return self.time_input_dim @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __lowercase ( self : Any ): '''simple docstring''' return 100 @property def __lowercase ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase__ : int = UNetaDConditionModel(**A ) return model @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowercase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs ) return model def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.dummy_unet UpperCAmelCase__ : List[Any] = self.dummy_movq UpperCAmelCase__ : List[Any] = DDIMScheduler( num_train_timesteps=1_000 ,beta_schedule="""linear""" ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=A ,set_alpha_to_one=A ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=A ,) UpperCAmelCase__ : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __lowercase ( self : str ,A : Optional[Any] ,A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( A ) # create hint UpperCAmelCase__ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase__ : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase__ : Dict = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase__ : Dict = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = """cpu""" UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Union[str, Any] = self.pipeline_class(**A ) UpperCAmelCase__ : Optional[int] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(A ) ) UpperCAmelCase__ : Tuple = output.images UpperCAmelCase__ : Dict = pipe( **self.get_dummy_inputs(A ) ,return_dict=A ,)[0] UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Optional[int] = np.array( [0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def __lowercase ( self : Union[str, Any] ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) UpperCAmelCase__ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) UpperCAmelCase__ : int = torch.from_numpy(np.array(A ) ).float() / 2_5_5.0 UpperCAmelCase__ : Union[str, Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) UpperCAmelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(A ) UpperCAmelCase__ : List[Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) UpperCAmelCase__ : int = pipeline.to(A ) pipeline.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[Any] = """A robot, 4k photo""" UpperCAmelCase__ : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = pipe_prior( A ,generator=A ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() UpperCAmelCase__ : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ : int = pipeline( image_embeds=A ,negative_image_embeds=A ,hint=A ,generator=A ,num_inference_steps=100 ,output_type="""np""" ,) UpperCAmelCase__ : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A ,A )
65
"""simple docstring""" __UpperCAmelCase = frozenset( [ 'prompt', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'prompt', 'image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # Text guided image variation with an image mask 'prompt', 'image', 'mask_image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # image variation with an image mask 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image', 'mask_image']) __UpperCAmelCase = frozenset( [ 'example_image', 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset( [ 'prompt', 'audio_length_in_s', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset(['input_tokens']) __UpperCAmelCase = frozenset(['input_tokens'])
65
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : List[str] = """""" for word_or_phrase in separated: if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise Exception("""join() accepts only strings to be joined""" ) joined += word_or_phrase + separator return joined.strip(__UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
65
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __lowercase ( unittest.TestCase ): def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split() UpperCAmelCase__ : Tuple = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[Any] = { """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>""", } UpperCAmelCase__ : int = { """feature_size""": 1, """padding_value""": 0.0, """sampling_rate""": 16_000, """return_attention_mask""": False, """do_normalize""": True, } UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp() UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,A ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) # load decoder from hub UpperCAmelCase__ : int = """hf-internal-testing/ngram-beam-search-decoder""" def __lowercase ( self : str ,**A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.add_kwargs_tokens_map.copy() kwargs.update(A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : List[str] ,**A : Dict ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Any ,**A : List[Any] ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**A ) def __lowercase ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,A ) # decoder self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,) self.assertIsInstance(processor.decoder ,A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha ,5.0 ) self.assertEqual(processor.language_model.beta ,3.0 ) self.assertEqual(processor.language_model.score_boundary ,-7.0 ) self.assertEqual(processor.language_model.unk_score_offset ,3 ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["""xx"""] ) with self.assertRaisesRegex(A ,"""include""" ): WavaVecaProcessorWithLM( tokenizer=A ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : str = floats_list((3, 1_000) ) UpperCAmelCase__ : Optional[Any] = feature_extractor(A ,return_tensors="""np""" ) UpperCAmelCase__ : List[Any] = processor(A ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[int] = self.get_decoder() UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : List[Any] = """This is a test string""" UpperCAmelCase__ : int = processor(text=A ) UpperCAmelCase__ : Dict = tokenizer(A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def __lowercase ( self : Tuple ,A : List[Any]=(2, 10, 16) ,A : Dict=77 ): '''simple docstring''' np.random.seed(A ) return np.random.rand(*A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Dict = self._get_dummy_logits(shape=(10, 16) ,seed=13 ) UpperCAmelCase__ : Tuple = processor.decode(A ) UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(A )[0] self.assertEqual(decoded_decoder[0] ,decoded_processor.text ) self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text ) self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score ) @parameterized.expand([[None], ["""fork"""], ["""spawn"""]] ) def __lowercase ( self : List[str] ,A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : List[str] = processor.batch_decode(A ) else: with get_context(A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(A ,A ) UpperCAmelCase__ : Optional[Any] = list(A ) with get_context("""fork""" ).Pool() as p: UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams_batch(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(A ,decoded_processor.text ) self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text ) self.assertListEqual(A ,decoded_processor.logit_score ) self.assertListEqual(A ,decoded_processor.lm_score ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Any = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : List[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,) UpperCAmelCase__ : List[str] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(A ) with get_context("""fork""" ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( A ,A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,) UpperCAmelCase__ : List[Any] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : List[str] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(A ,A ) self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,A ) self.assertTrue(np.array_equal(A ,decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,A ,atol=1e-3 ) ) self.assertTrue(np.array_equal(A ,decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,A ,atol=1e-3 ) ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : Tuple = 2.0 UpperCAmelCase__ : str = 5.0 UpperCAmelCase__ : Union[str, Any] = -2_0.0 UpperCAmelCase__ : Optional[Any] = True UpperCAmelCase__ : str = processor.batch_decode( A ,alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,) UpperCAmelCase__ : Any = decoded_processor_out.text UpperCAmelCase__ : Union[str, Any] = list(A ) decoder.reset_params( alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,) with get_context("""fork""" ).Pool() as pool: UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch( A ,A ,) UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(A ,A ) self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,A ) UpperCAmelCase__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha ,2.0 ) self.assertEqual(lm_model.beta ,5.0 ) self.assertEqual(lm_model.unk_score_offset ,-2_0.0 ) self.assertEqual(lm_model.score_boundary ,A ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : str = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCAmelCase__ : Optional[int] = os.listdir(A ) UpperCAmelCase__ : List[Any] = ["""alphabet.json""", """language_model"""] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(A ,A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(A ) UpperCAmelCase__ : Tuple = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCAmelCase__ : Tuple = os.listdir(A ) UpperCAmelCase__ : Dict = os.listdir(A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(A ,A ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Dict = floats_list((3, 1_000) ) UpperCAmelCase__ : List[str] = processor_wavaveca(A ,return_tensors="""np""" ) UpperCAmelCase__ : Dict = processor_auto(A ,return_tensors="""np""" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 ) UpperCAmelCase__ : List[str] = self._get_dummy_logits() UpperCAmelCase__ : Tuple = processor_wavaveca.batch_decode(A ) UpperCAmelCase__ : List[str] = processor_auto.batch_decode(A ) self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) self.assertListEqual( processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,) @staticmethod def __lowercase ( A : Optional[Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets] return retrieved_list def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Dict = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(A ,output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(A ,A ) ) self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : int = self._get_dummy_logits() UpperCAmelCase__ : Any = processor.batch_decode(A ,output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(A ,A ) ) self.assertListEqual( [""" """.join(self.get_from_offsets(A ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] ) @slow @require_torch @require_torchaudio def __lowercase ( self : Tuple ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=A ) UpperCAmelCase__ : Tuple = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : Tuple = iter(A ) UpperCAmelCase__ : Optional[int] = next(A ) UpperCAmelCase__ : List[Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) UpperCAmelCase__ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : Tuple = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(A ).logits.cpu().numpy() UpperCAmelCase__ : Any = processor.decode(logits[0] ,output_word_offsets=A ) UpperCAmelCase__ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Union[str, Any] = [ { """start_time""": d["""start_offset"""] * time_offset, """end_time""": d["""end_offset"""] * time_offset, """word""": d["""word"""], } for d in output["""word_offsets"""] ] UpperCAmelCase__ : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL""" # output words self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,A ) self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,output.text ) # output times UpperCAmelCase__ : str = torch.tensor(self.get_from_offsets(A ,"""start_time""" ) ) UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(A ,"""end_time""" ) ) # fmt: off UpperCAmelCase__ : Union[str, Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) ) self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
65
1
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'spiece.model'} __UpperCAmelCase = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', } } __UpperCAmelCase = { 'albert-base-v1': 512, 'albert-large-v1': 512, 'albert-xlarge-v1': 512, 'albert-xxlarge-v1': 512, 'albert-base-v2': 512, 'albert-large-v2': 512, 'albert-xlarge-v2': 512, 'albert-xxlarge-v2': 512, } __UpperCAmelCase = '▁' class __lowercase ( __lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] ,A : Dict ,A : Union[str, Any]=True ,A : Optional[Any]=True ,A : Any=False ,A : Optional[Any]="[CLS]" ,A : Union[str, Any]="[SEP]" ,A : Dict="<unk>" ,A : Optional[Any]="[SEP]" ,A : Union[str, Any]="<pad>" ,A : int="[CLS]" ,A : Optional[int]="[MASK]" ,A : Optional[Dict[str, Any]] = None ,**A : Any ,): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCAmelCase__ : List[Any] = ( AddedToken(A ,lstrip=A ,rstrip=A ,normalized=A ) if isinstance(A ,A ) else mask_token ) UpperCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=A ,remove_space=A ,keep_accents=A ,bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,pad_token=A ,cls_token=A ,mask_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) UpperCAmelCase__ : List[Any] = do_lower_case UpperCAmelCase__ : List[Any] = remove_space UpperCAmelCase__ : str = keep_accents UpperCAmelCase__ : List[Any] = vocab_file UpperCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) @property def __lowercase ( self : Optional[int] ): '''simple docstring''' return len(self.sp_model ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = self.__dict__.copy() UpperCAmelCase__ : Any = None return state def __setstate__( self : List[Any] ,A : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): UpperCAmelCase__ : List[str] = {} UpperCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowercase ( self : Any ,A : List[str] ): '''simple docstring''' if self.remove_space: UpperCAmelCase__ : Any = """ """.join(inputs.strip().split() ) else: UpperCAmelCase__ : Union[str, Any] = inputs UpperCAmelCase__ : Optional[int] = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" ) if not self.keep_accents: UpperCAmelCase__ : Optional[Any] = unicodedata.normalize("""NFKD""" ,A ) UpperCAmelCase__ : Optional[Any] = """""".join([c for c in outputs if not unicodedata.combining(A )] ) if self.do_lower_case: UpperCAmelCase__ : int = outputs.lower() return outputs def __lowercase ( self : Union[str, Any] ,A : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.preprocess_text(A ) UpperCAmelCase__ : str = self.sp_model.encode(A ,out_type=A ) UpperCAmelCase__ : List[Any] = [] for piece in pieces: if len(A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): UpperCAmelCase__ : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(A ,"""""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase__ : Union[str, Any] = cur_pieces[1:] else: UpperCAmelCase__ : int = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(A ) else: new_pieces.append(A ) return new_pieces def __lowercase ( self : Union[str, Any] ,A : int ): '''simple docstring''' return self.sp_model.PieceToId(A ) def __lowercase ( self : Optional[int] ,A : str ): '''simple docstring''' return self.sp_model.IdToPiece(A ) def __lowercase ( self : Tuple ,A : str ): '''simple docstring''' UpperCAmelCase__ : int = [] UpperCAmelCase__ : Any = """""" UpperCAmelCase__ : Optional[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token UpperCAmelCase__ : Any = True UpperCAmelCase__ : str = [] else: current_sub_tokens.append(A ) UpperCAmelCase__ : str = False out_string += self.sp_model.decode(A ) return out_string.strip() def __lowercase ( self : Any ,A : List[int] ,A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : int = [self.sep_token_id] UpperCAmelCase__ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowercase ( self : str ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is not None: return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1] def __lowercase ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [self.sep_token_id] UpperCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self : Union[str, Any] ,A : str ,A : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(A ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase__ : List[str] = os.path.join( A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"""wb""" ) as fi: UpperCAmelCase__ : int = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
65
"""simple docstring""" from sklearn.metrics import fa_score import datasets __UpperCAmelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' __UpperCAmelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' __UpperCAmelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): def __lowercase ( self : List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,) def __lowercase ( self : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Optional[Any]=None ,A : List[str]=1 ,A : Optional[Any]="binary" ,A : Any=None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = fa_score( A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A ) return {"f1": float(A ) if score.size == 1 else score}
65
1
"""simple docstring""" import requests def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Tuple = {"""Content-Type""": """application/json"""} UpperCAmelCase__ : Optional[Any] = requests.post(__UpperCamelCase , json={"""text""": message_body} , headers=__UpperCamelCase ) if response.status_code != 200: UpperCAmelCase__ : Any = ( """Request to slack returned an error """ F"{response.status_code}, the response is:\n{response.text}" ) raise ValueError(__UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
65
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model') __UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'} __UpperCAmelCase = '>>zh<<' __UpperCAmelCase = 'Helsinki-NLP/' if is_torch_available(): __UpperCAmelCase = 'pt' elif is_tf_available(): __UpperCAmelCase = 'tf' else: __UpperCAmelCase = 'jax' @require_sentencepiece class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = MarianTokenizer snake_case_ = False snake_case_ = True def __lowercase ( self : Optional[int] ): '''simple docstring''' super().setUp() UpperCAmelCase__ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] UpperCAmelCase__ : int = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(A ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(A ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) UpperCAmelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase ( self : List[Any] ,**A : List[Any] ): '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Union[str, Any] ,A : Tuple ): '''simple docstring''' return ( "This is a test", "This is a test", ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = """</s>""" UpperCAmelCase__ : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""</s>""" ) self.assertEqual(vocab_keys[1] ,"""<unk>""" ) self.assertEqual(vocab_keys[-1] ,"""<pad>""" ) self.assertEqual(len(A ) ,9 ) def __lowercase ( self : Dict ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,9 ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" ) UpperCAmelCase__ : List[str] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=A ) self.assertIsInstance(A ,A ) UpperCAmelCase__ : str = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(A ,batch.input_ids[0] ) UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(A ) UpperCAmelCase__ : Tuple = [x.name for x in Path(A ).glob("""*""" )] self.assertIn("""source.spm""" ,A ) MarianTokenizer.from_pretrained(A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] ,padding=A ,truncation=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch.input_ids.shape ,(2, 512) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) ) @slow def __lowercase ( self : Dict ): '''simple docstring''' # fmt: off UpperCAmelCase__ : Optional[int] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) UpperCAmelCase__ : Any = """Tämä on testi""" UpperCAmelCase__ : int = """This is a test""" UpperCAmelCase__ : List[str] = [76, 7, 2_047, 2] UpperCAmelCase__ : Optional[Any] = [69, 12, 11, 940, 2] UpperCAmelCase__ : List[str] = tokenizer(A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : Optional[int] = tokenizer(text_target=A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A ) self.assertEqual(A ,A )
65
1
"""simple docstring""" from __future__ import annotations import math def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if depth < 0: raise ValueError("""Depth cannot be less than 0""" ) if not scores: raise ValueError("""Scores cannot be empty""" ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , ) ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : str = [90, 23, 6, 33, 21, 65, 123, 34423] UpperCAmelCase__ : Optional[Any] = math.log(len(__UpperCamelCase ) , 2 ) print(F"Optimal value : {minimax(0 , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )}" ) if __name__ == "__main__": import doctest doctest.testmod() main()
65
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowercase ( metaclass=__lowerCamelCase ): snake_case_ = ["""onnx"""] def __init__( self : int ,*A : List[str] ,**A : int ): '''simple docstring''' requires_backends(self ,["""onnx"""] ) @classmethod def __lowercase ( cls : Optional[Any] ,*A : List[str] ,**A : Dict ): '''simple docstring''' requires_backends(cls ,["""onnx"""] ) @classmethod def __lowercase ( cls : List[Any] ,*A : Optional[int] ,**A : int ): '''simple docstring''' requires_backends(cls ,["""onnx"""] )
65
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Tuple = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowerCAmelCase ( __UpperCamelCase = 5000 ): '''simple docstring''' UpperCAmelCase__ : List[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , __UpperCamelCase )] for i, pentagonal_i in enumerate(__UpperCamelCase ): for j in range(__UpperCamelCase , len(__UpperCamelCase ) ): UpperCAmelCase__ : List[str] = pentagonal_nums[j] UpperCAmelCase__ : List[str] = pentagonal_i + pentagonal_j UpperCAmelCase__ : Union[str, Any] = pentagonal_j - pentagonal_i if is_pentagonal(__UpperCamelCase ) and is_pentagonal(__UpperCamelCase ): return b return -1 if __name__ == "__main__": print(F"{solution() = }")
65
"""simple docstring""" from argparse import ArgumentParser from .env import EnvironmentCommand def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) UpperCAmelCase__ : List[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(__UpperCamelCase ) # Let's go UpperCAmelCase__ : int = parser.parse_args() if not hasattr(__UpperCamelCase , """func""" ): parser.print_help() exit(1 ) # Run UpperCAmelCase__ : Union[str, Any] = args.func(__UpperCamelCase ) service.run() if __name__ == "__main__": main()
65
1
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase="pt" ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = {"""add_prefix_space""": True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(""" """ ) else {} UpperCAmelCase__ : List[Any] = padding_side return tokenizer( [line] , max_length=__UpperCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , ): '''simple docstring''' UpperCAmelCase__ : List[str] = input_ids.ne(__UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowercase ( __lowerCamelCase ): def __init__( self : Optional[int] ,A : Optional[int] ,A : Any ,A : Optional[int] ,A : List[Any] ,A : Optional[int]="train" ,A : Any=None ,A : Optional[int]=None ,A : Union[str, Any]=None ,A : Tuple="" ,): '''simple docstring''' super().__init__() UpperCAmelCase__ : Union[str, Any] = Path(A ).joinpath(type_path + """.source""" ) UpperCAmelCase__ : Union[str, Any] = Path(A ).joinpath(type_path + """.target""" ) UpperCAmelCase__ : int = self.get_char_lens(self.src_file ) UpperCAmelCase__ : str = max_source_length UpperCAmelCase__ : Any = max_target_length assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}" UpperCAmelCase__ : Optional[int] = tokenizer UpperCAmelCase__ : int = prefix if n_obs is not None: UpperCAmelCase__ : Union[str, Any] = self.src_lens[:n_obs] UpperCAmelCase__ : List[str] = src_lang UpperCAmelCase__ : Optional[int] = tgt_lang def __len__( self : List[str] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : List[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = index + 1 # linecache starts at 1 UpperCAmelCase__ : List[str] = self.prefix + linecache.getline(str(self.src_file ) ,A ).rstrip("""\n""" ) UpperCAmelCase__ : List[str] = linecache.getline(str(self.tgt_file ) ,A ).rstrip("""\n""" ) assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,A ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCAmelCase__ : int = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,A ) else self.tokenizer ) UpperCAmelCase__ : int = self.tokenizer.generator if isinstance(self.tokenizer ,A ) else self.tokenizer UpperCAmelCase__ : Optional[int] = encode_line(A ,A ,self.max_source_length ,"""right""" ) UpperCAmelCase__ : str = encode_line(A ,A ,self.max_target_length ,"""right""" ) UpperCAmelCase__ : Optional[Any] = source_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : Dict = target_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : int = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def __lowercase ( A : List[str] ): '''simple docstring''' return [len(A ) for x in Path(A ).open().readlines()] def __lowercase ( self : Optional[Any] ,A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = torch.stack([x["""input_ids"""] for x in batch] ) UpperCAmelCase__ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) UpperCAmelCase__ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] ) UpperCAmelCase__ : Optional[int] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : List[str] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : Any = trim_batch(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = trim_batch(A ,A ,attention_mask=A ) UpperCAmelCase__ : Any = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return list(itertools.chain.from_iterable(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : int = get_git_info() save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , """git_log.json""" ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=4 , **__UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """w""" ) as f: json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase ) as f: return json.load(__UpperCamelCase ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = git.Repo(search_parent_directories=__UpperCamelCase ) UpperCAmelCase__ : Optional[Any] = { """repo_id""": str(__UpperCamelCase ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return list(map(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """wb""" ) as f: return pickle.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' def remove_articles(__UpperCamelCase ): return re.sub(r"""\b(a|an|the)\b""" , """ """ , __UpperCamelCase ) def white_space_fix(__UpperCamelCase ): return " ".join(text.split() ) def remove_punc(__UpperCamelCase ): UpperCAmelCase__ : Dict = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Tuple = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : str = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : str = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase ) UpperCAmelCase__ : int = sum(common.values() ) if num_same == 0: return 0 UpperCAmelCase__ : Union[str, Any] = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : str = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : Any = (2 * precision * recall) / (precision + recall) return fa def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' assert len(__UpperCamelCase ) == len(__UpperCamelCase ) UpperCAmelCase__ : Any = 0 for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ): em += exact_match_score(__UpperCamelCase , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: em /= len(__UpperCamelCase ) return {"em": em} def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Any = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCAmelCase__ : Any = """dropout_rate""" for p in extra_params: if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(__UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) continue UpperCAmelCase__ : Union[str, Any] = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p] setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) return hparams, config
65
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __UpperCAmelCase = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __lowercase : snake_case_ = PegasusConfig snake_case_ = {} snake_case_ = """gelu""" def __init__( self : List[Any] ,A : int ,A : Optional[Any]=13 ,A : Dict=7 ,A : Dict=True ,A : Any=False ,A : Dict=99 ,A : int=32 ,A : Optional[int]=5 ,A : Union[str, Any]=4 ,A : Union[str, Any]=37 ,A : str=0.1 ,A : int=0.1 ,A : Optional[int]=20 ,A : Tuple=2 ,A : str=1 ,A : Optional[Any]=0 ,): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : int = is_training UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : Dict = hidden_size UpperCAmelCase__ : Optional[Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Any = intermediate_size UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = eos_token_id UpperCAmelCase__ : Union[str, Any] = pad_token_id UpperCAmelCase__ : List[str] = bos_token_id def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size ) UpperCAmelCase__ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 ) UpperCAmelCase__ : Any = np.concatenate([input_ids, eos_tensor] ,axis=1 ) UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase__ : str = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) UpperCAmelCase__ : Optional[Any] = prepare_pegasus_inputs_dict(A ,A ,A ) return config, inputs_dict def __lowercase ( self : Any ,A : Optional[int] ,A : str ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = 20 UpperCAmelCase__ : Dict = model_class_name(A ) UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A ) UpperCAmelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" ) UpperCAmelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,) UpperCAmelCase__ : Optional[int] = model.decode( decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,) UpperCAmelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" ) UpperCAmelCase__ : int = model.decode( decoder_input_ids[:, -1:] ,A ,decoder_attention_mask=A ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=A ,) UpperCAmelCase__ : Dict = model.decode(A ,A ) UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" ) def __lowercase ( self : Optional[int] ,A : str ,A : Dict ,A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = 20 UpperCAmelCase__ : str = model_class_name(A ) UpperCAmelCase__ : Any = model.encode(inputs_dict["""input_ids"""] ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) UpperCAmelCase__ : Optional[int] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] ,axis=-1 ,) UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A ) UpperCAmelCase__ : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,) UpperCAmelCase__ : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,) UpperCAmelCase__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" ) UpperCAmelCase__ : Dict = model.decode( decoder_input_ids[:, -1:] ,A ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=A ,decoder_position_ids=A ,) UpperCAmelCase__ : Union[str, Any] = model.decode(A ,A ,decoder_attention_mask=A ) UpperCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase__ : Union[str, Any] = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: UpperCAmelCase__ : Tuple = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) snake_case_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () snake_case_ = True snake_case_ = False snake_case_ = False snake_case_ = False def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = FlaxPegasusModelTester(self ) UpperCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=A ) def __lowercase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(A ,A ,A ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(A ,A ,A ) def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A ) UpperCAmelCase__ : int = model_class(A ) @jax.jit def encode_jitted(A : Optional[int] ,A : Union[str, Any]=None ,**A : Optional[Any] ): return model.encode(input_ids=A ,attention_mask=A ) with self.subTest("""JIT Enabled""" ): UpperCAmelCase__ : int = encode_jitted(**A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCAmelCase__ : Dict = encode_jitted(**A ).to_tuple() self.assertEqual(len(A ) ,len(A ) ) for jitted_output, output in zip(A ,A ): self.assertEqual(jitted_output.shape ,output.shape ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : Dict = model_class(A ) UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] ) UpperCAmelCase__ : Dict = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(A : List[Any] ,A : Any ,A : List[Any] ): return model.decode( decoder_input_ids=A ,decoder_attention_mask=A ,encoder_outputs=A ,) with self.subTest("""JIT Enabled""" ): UpperCAmelCase__ : Tuple = decode_jitted(**A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCAmelCase__ : str = decode_jitted(**A ).to_tuple() self.assertEqual(len(A ) ,len(A ) ) for jitted_output, output in zip(A ,A ): self.assertEqual(jitted_output.shape ,output.shape ) @slow def __lowercase ( self : List[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=A ) UpperCAmelCase__ : Any = np.ones((1, 1) ) UpperCAmelCase__ : Optional[Any] = model(A ) self.assertIsNotNone(A ) @slow def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) UpperCAmelCase__ : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) UpperCAmelCase__ : Union[str, Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] UpperCAmelCase__ : str = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] UpperCAmelCase__ : str = tokenizer(A ,return_tensors="""np""" ,truncation=A ,max_length=512 ,padding=A ) UpperCAmelCase__ : Union[str, Any] = model.generate(**A ,num_beams=2 ).sequences UpperCAmelCase__ : int = tokenizer.batch_decode(A ,skip_special_tokens=A ) assert tgt_text == decoded
65
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __UpperCAmelCase = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
65
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) UpperCAmelCase__ : Union[str, Any] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
65
1
"""simple docstring""" __UpperCAmelCase = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on __UpperCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()} def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return "".join(REVERSE_DICT[char] for char in message.split() ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = """Morse code here!""" print(__UpperCamelCase ) UpperCAmelCase__ : List[str] = encrypt(__UpperCamelCase ) print(__UpperCamelCase ) UpperCAmelCase__ : int = decrypt(__UpperCamelCase ) print(__UpperCamelCase ) if __name__ == "__main__": main()
65
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def lowerCAmelCase ( __UpperCamelCase = "isbn/0140328726" ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: UpperCAmelCase__ : Dict = F"{olid} is not a valid Open Library olid" raise ValueError(__UpperCamelCase ) return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json() def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Any = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } UpperCAmelCase__ : Dict = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} UpperCAmelCase__ : str = [ get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""] ] UpperCAmelCase__ : Dict = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : Dict = """, """.join(__UpperCamelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __UpperCAmelCase = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.") continue print(F"\nSearching Open Library for ISBN: {isbn}...\n") try: __UpperCAmelCase = summarize_book(get_openlibrary_data(F"isbn/{isbn}")) print('\n'.join(F"{key}: {value}" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"Sorry, there are no results for ISBN: {isbn}.")
65
1
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class __lowercase ( __lowerCamelCase ): def __lowercase ( self : int ,A : Dict=None ,A : Optional[Any]=None ,A : Any=None ,**A : Dict ): '''simple docstring''' if tokenize_kwargs is None: UpperCAmelCase__ : List[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) UpperCAmelCase__ : List[str] = truncation UpperCAmelCase__ : Union[str, Any] = tokenize_kwargs UpperCAmelCase__ : List[Any] = {} if return_tensors is not None: UpperCAmelCase__ : Union[str, Any] = return_tensors return preprocess_params, {}, postprocess_params def __lowercase ( self : Optional[int] ,A : str ,**A : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.framework UpperCAmelCase__ : Optional[int] = self.tokenizer(A ,return_tensors=A ,**A ) return model_inputs def __lowercase ( self : Optional[int] ,A : int ): '''simple docstring''' UpperCAmelCase__ : str = self.model(**A ) return model_outputs def __lowercase ( self : Union[str, Any] ,A : Dict ,A : Dict=False ): '''simple docstring''' # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Union[str, Any] ,*A : Tuple ,**A : Optional[int] ): '''simple docstring''' return super().__call__(*A ,**A )
65
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase="pt" ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {"""add_prefix_space""": True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(""" """ ) else {} UpperCAmelCase__ : List[str] = padding_side return tokenizer( [line] , max_length=__UpperCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , ): '''simple docstring''' UpperCAmelCase__ : str = input_ids.ne(__UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowercase ( __lowerCamelCase ): def __init__( self : Tuple ,A : List[Any] ,A : Union[str, Any] ,A : Any ,A : Optional[int] ,A : Union[str, Any]="train" ,A : Tuple=None ,A : Union[str, Any]=None ,A : Tuple=None ,A : int="" ,): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[Any] = Path(A ).joinpath(type_path + """.source""" ) UpperCAmelCase__ : List[str] = Path(A ).joinpath(type_path + """.target""" ) UpperCAmelCase__ : Dict = self.get_char_lens(self.src_file ) UpperCAmelCase__ : int = max_source_length UpperCAmelCase__ : List[str] = max_target_length assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}" UpperCAmelCase__ : Dict = tokenizer UpperCAmelCase__ : str = prefix if n_obs is not None: UpperCAmelCase__ : int = self.src_lens[:n_obs] UpperCAmelCase__ : Any = src_lang UpperCAmelCase__ : Any = tgt_lang def __len__( self : Optional[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = index + 1 # linecache starts at 1 UpperCAmelCase__ : Tuple = self.prefix + linecache.getline(str(self.src_file ) ,A ).rstrip("""\n""" ) UpperCAmelCase__ : Dict = linecache.getline(str(self.tgt_file ) ,A ).rstrip("""\n""" ) assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,A ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCAmelCase__ : str = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,A ) else self.tokenizer ) UpperCAmelCase__ : Tuple = self.tokenizer.generator if isinstance(self.tokenizer ,A ) else self.tokenizer UpperCAmelCase__ : Tuple = encode_line(A ,A ,self.max_source_length ,"""right""" ) UpperCAmelCase__ : Dict = encode_line(A ,A ,self.max_target_length ,"""right""" ) UpperCAmelCase__ : Optional[Any] = source_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : List[str] = target_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def __lowercase ( A : int ): '''simple docstring''' return [len(A ) for x in Path(A ).open().readlines()] def __lowercase ( self : List[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : int = torch.stack([x["""input_ids"""] for x in batch] ) UpperCAmelCase__ : Union[str, Any] = torch.stack([x["""attention_mask"""] for x in batch] ) UpperCAmelCase__ : Any = torch.stack([x["""decoder_input_ids"""] for x in batch] ) UpperCAmelCase__ : List[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : Any = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : str = trim_batch(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = trim_batch(A ,A ,attention_mask=A ) UpperCAmelCase__ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return list(itertools.chain.from_iterable(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Dict = get_git_info() save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , """git_log.json""" ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=4 , **__UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """w""" ) as f: json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase ) as f: return json.load(__UpperCamelCase ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = git.Repo(search_parent_directories=__UpperCamelCase ) UpperCAmelCase__ : List[str] = { """repo_id""": str(__UpperCamelCase ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return list(map(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """wb""" ) as f: return pickle.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' def remove_articles(__UpperCamelCase ): return re.sub(r"""\b(a|an|the)\b""" , """ """ , __UpperCamelCase ) def white_space_fix(__UpperCamelCase ): return " ".join(text.split() ) def remove_punc(__UpperCamelCase ): UpperCAmelCase__ : List[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : Dict = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : int = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase ) UpperCAmelCase__ : List[str] = sum(common.values() ) if num_same == 0: return 0 UpperCAmelCase__ : str = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : Optional[int] = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : Tuple = (2 * precision * recall) / (precision + recall) return fa def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' assert len(__UpperCamelCase ) == len(__UpperCamelCase ) UpperCAmelCase__ : Union[str, Any] = 0 for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ): em += exact_match_score(__UpperCamelCase , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: em /= len(__UpperCamelCase ) return {"em": em} def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCAmelCase__ : str = """dropout_rate""" for p in extra_params: if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(__UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) continue UpperCAmelCase__ : Tuple = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p] setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) return hparams, config
65
1
"""simple docstring""" __UpperCAmelCase = [ (1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'), (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'), (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000} UpperCAmelCase__ : List[Any] = 0 UpperCAmelCase__ : Optional[Any] = 0 while place < len(__UpperCamelCase ): if (place + 1 < len(__UpperCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] for arabic, roman in ROMAN: ((UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[int] = divmod(__UpperCamelCase , __UpperCamelCase ) result.append(roman * factor ) if number == 0: break return "".join(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
65
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaControlnetPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def __lowercase ( self : int ): '''simple docstring''' return 32 @property def __lowercase ( self : Dict ): '''simple docstring''' return self.time_input_dim @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __lowercase ( self : Any ): '''simple docstring''' return 100 @property def __lowercase ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase__ : int = UNetaDConditionModel(**A ) return model @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowercase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs ) return model def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.dummy_unet UpperCAmelCase__ : List[Any] = self.dummy_movq UpperCAmelCase__ : List[Any] = DDIMScheduler( num_train_timesteps=1_000 ,beta_schedule="""linear""" ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=A ,set_alpha_to_one=A ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=A ,) UpperCAmelCase__ : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __lowercase ( self : str ,A : Optional[Any] ,A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( A ) # create hint UpperCAmelCase__ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase__ : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase__ : Dict = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase__ : Dict = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = """cpu""" UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Union[str, Any] = self.pipeline_class(**A ) UpperCAmelCase__ : Optional[int] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(A ) ) UpperCAmelCase__ : Tuple = output.images UpperCAmelCase__ : Dict = pipe( **self.get_dummy_inputs(A ) ,return_dict=A ,)[0] UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Optional[int] = np.array( [0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def __lowercase ( self : Union[str, Any] ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) UpperCAmelCase__ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) UpperCAmelCase__ : int = torch.from_numpy(np.array(A ) ).float() / 2_5_5.0 UpperCAmelCase__ : Union[str, Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) UpperCAmelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(A ) UpperCAmelCase__ : List[Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) UpperCAmelCase__ : int = pipeline.to(A ) pipeline.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[Any] = """A robot, 4k photo""" UpperCAmelCase__ : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = pipe_prior( A ,generator=A ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() UpperCAmelCase__ : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ : int = pipeline( image_embeds=A ,negative_image_embeds=A ,hint=A ,generator=A ,num_inference_steps=100 ,output_type="""np""" ,) UpperCAmelCase__ : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A ,A )
65
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class __lowercase : snake_case_ = 42 snake_case_ = None snake_case_ = None def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = Node(1 ) UpperCAmelCase__ : int = Node(2 ) UpperCAmelCase__ : str = Node(3 ) UpperCAmelCase__ : List[str] = Node(4 ) UpperCAmelCase__ : int = Node(5 ) return tree def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : list[Any] = [] if root is None: return output UpperCAmelCase__ : int = deque([root] ) while process_queue: UpperCAmelCase__ : Union[str, Any] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : list[Any] = [] def populate_output(__UpperCamelCase , __UpperCamelCase ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(__UpperCamelCase , __UpperCamelCase ) return output def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : list[Any] = [] def populate_output(__UpperCamelCase , __UpperCamelCase ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(__UpperCamelCase , __UpperCamelCase ) return output def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if root is None: return [] UpperCAmelCase__ : list[Sequence[Node | None]] = [] UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : Union[str, Any] = height(__UpperCamelCase ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(__UpperCamelCase , __UpperCamelCase ) ) UpperCAmelCase__ : Tuple = 1 else: output.append(get_nodes_from_right_to_left(__UpperCamelCase , __UpperCamelCase ) ) UpperCAmelCase__ : Union[str, Any] = 0 return output def lowerCAmelCase ( ): # Main function for testing. '''simple docstring''' UpperCAmelCase__ : Tuple = make_tree() print(F"In-order Traversal: {inorder(__UpperCamelCase )}" ) print(F"Pre-order Traversal: {preorder(__UpperCamelCase )}" ) print(F"Post-order Traversal: {postorder(__UpperCamelCase )}" , """\n""" ) print(F"Height of Tree: {height(__UpperCamelCase )}" , """\n""" ) print("""Complete Level Order Traversal: """ ) print(level_order(__UpperCamelCase ) , """\n""" ) print("""Level-wise order Traversal: """ ) for level in range(1 , height(__UpperCamelCase ) + 1 ): print(F"Level {level}:" , get_nodes_from_left_to_right(__UpperCamelCase , level=__UpperCamelCase ) ) print("""\nZigZag order Traversal: """ ) print(zigzag(__UpperCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
65
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = """vision-encoder-decoder""" snake_case_ = True def __init__( self : List[Any] ,**A : Union[str, Any] ): '''simple docstring''' super().__init__(**A ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" ) UpperCAmelCase__ : int = kwargs.pop("""encoder""" ) UpperCAmelCase__ : int = encoder_config.pop("""model_type""" ) UpperCAmelCase__ : str = kwargs.pop("""decoder""" ) UpperCAmelCase__ : Dict = decoder_config.pop("""model_type""" ) UpperCAmelCase__ : List[Any] = AutoConfig.for_model(A ,**A ) UpperCAmelCase__ : Any = AutoConfig.for_model(A ,**A ) UpperCAmelCase__ : Union[str, Any] = True @classmethod def __lowercase ( cls : List[Any] ,A : PretrainedConfig ,A : PretrainedConfig ,**A : Tuple ): '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) UpperCAmelCase__ : Union[str, Any] = True UpperCAmelCase__ : List[Any] = True return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**A ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Dict = self.encoder.to_dict() UpperCAmelCase__ : Any = self.decoder.to_dict() UpperCAmelCase__ : Dict = self.__class__.model_type return output class __lowercase ( __lowerCamelCase ): snake_case_ = version.parse("""1.11""" ) @property def __lowercase ( self : Optional[int] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowercase ( self : List[Any] ): '''simple docstring''' return 1e-4 @property def __lowercase ( self : List[Any] ): '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class __lowercase ( __lowerCamelCase ): @property def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : int = OrderedDict() UpperCAmelCase__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase__ : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase__ : List[str] = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def __lowercase ( self : Dict ,A : "PreTrainedTokenizerBase" ,A : int = -1 ,A : int = -1 ,A : bool = False ,A : Optional["TensorType"] = None ,): '''simple docstring''' import torch UpperCAmelCase__ : int = OrderedDict() UpperCAmelCase__ : List[Any] = super().generate_dummy_inputs( A ,batch_size=A ,seq_length=A ,is_pair=A ,framework=A ) UpperCAmelCase__ , UpperCAmelCase__ : int = dummy_input["""input_ids"""].shape UpperCAmelCase__ : int = (batch, encoder_sequence, self._config.encoder_hidden_size) UpperCAmelCase__ : Tuple = dummy_input.pop("""input_ids""" ) UpperCAmelCase__ : Optional[int] = dummy_input.pop("""attention_mask""" ) UpperCAmelCase__ : Dict = torch.zeros(A ) return common_inputs class __lowercase ( __lowerCamelCase ): @property def __lowercase ( self : str ): '''simple docstring''' pass def __lowercase ( self : Any ,A : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(A ) def __lowercase ( self : Dict ,A : PretrainedConfig ,A : PretrainedConfig ,A : str = "default" ): '''simple docstring''' UpperCAmelCase__ : List[str] = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(A ,A )
65
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowercase ( __lowerCamelCase ): def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A ,"""embed_dim""" ) ) self.parent.assertTrue(hasattr(A ,"""num_heads""" ) ) class __lowercase : def __init__( self : Optional[int] ,A : Tuple ,A : Any=13 ,A : str=64 ,A : List[str]=3 ,A : int=[16, 48, 96] ,A : Optional[Any]=[1, 3, 6] ,A : Any=[1, 2, 10] ,A : List[str]=[7, 3, 3] ,A : str=[4, 2, 2] ,A : str=[2, 1, 1] ,A : Union[str, Any]=[2, 2, 2] ,A : List[str]=[False, False, True] ,A : Dict=[0.0, 0.0, 0.0] ,A : Tuple=0.0_2 ,A : List[Any]=1e-12 ,A : Dict=True ,A : Optional[Any]=True ,A : Optional[int]=2 ,): '''simple docstring''' UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[str] = patch_sizes UpperCAmelCase__ : int = patch_stride UpperCAmelCase__ : Tuple = patch_padding UpperCAmelCase__ : Tuple = is_training UpperCAmelCase__ : Dict = use_labels UpperCAmelCase__ : Dict = num_labels UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Any = embed_dim UpperCAmelCase__ : Tuple = num_heads UpperCAmelCase__ : Union[str, Any] = stride_kv UpperCAmelCase__ : List[str] = depth UpperCAmelCase__ : Optional[Any] = cls_token UpperCAmelCase__ : List[Any] = attention_drop_rate UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Optional[int] = layer_norm_eps def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Any = None if self.use_labels: # create a random int32 tensor of given shape UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_labels ) UpperCAmelCase__ : Dict = self.get_config() return config, pixel_values, labels def __lowercase ( self : Optional[int] ): '''simple docstring''' return CvtConfig( image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,) def __lowercase ( self : List[str] ,A : Any ,A : List[Any] ,A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = TFCvtModel(config=A ) UpperCAmelCase__ : Dict = model(A ,training=A ) UpperCAmelCase__ : List[str] = (self.image_size, self.image_size) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = image_size[0], image_size[1] for i in range(len(self.depth ) ): UpperCAmelCase__ : Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) UpperCAmelCase__ : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) ) def __lowercase ( self : int ,A : List[Any] ,A : int ,A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_labels UpperCAmelCase__ : List[Any] = TFCvtForImageClassification(A ) UpperCAmelCase__ : Union[str, Any] = model(A ,labels=A ,training=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): snake_case_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () snake_case_ = ( {"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification} if is_tf_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = TFCvtModelTester(self ) UpperCAmelCase__ : Union[str, Any] = TFCvtConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 ) def __lowercase ( self : List[str] ): '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="""Cvt does not output attentions""" ) def __lowercase ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def __lowercase ( self : Dict ): '''simple docstring''' pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def __lowercase ( self : int ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,) def __lowercase ( self : str ): '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,) @slow def __lowercase ( self : str ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = tf.keras.mixed_precision.Policy("""mixed_float16""" ) tf.keras.mixed_precision.set_global_policy(A ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("""float32""" ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(A ) UpperCAmelCase__ : List[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Optional[int] = [*signature.parameters.keys()] UpperCAmelCase__ : Dict = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,A ) def __lowercase ( self : Tuple ): '''simple docstring''' def check_hidden_states_output(A : List[Any] ,A : Tuple ,A : int ): UpperCAmelCase__ : Any = model_class(A ) UpperCAmelCase__ : Any = model(**self._prepare_for_class(A ,A ) ) UpperCAmelCase__ : List[str] = outputs.hidden_states UpperCAmelCase__ : int = len(self.model_tester.depth ) self.assertEqual(len(A ) ,A ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) ,[ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] ,) UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = True check_hidden_states_output(A ,A ,A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : List[str] = True check_hidden_states_output(A ,A ,A ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) @slow def __lowercase ( self : Tuple ): '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[Any] = TFCvtModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __lowercase ( unittest.TestCase ): @cached_property def __lowercase ( self : str ): '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Optional[Any] = prepare_img() UpperCAmelCase__ : Optional[int] = image_processor(images=A ,return_tensors="""tf""" ) # forward pass UpperCAmelCase__ : Union[str, Any] = model(**A ) # verify the logits UpperCAmelCase__ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape ,A ) UpperCAmelCase__ : Any = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,A ,atol=1e-4 ) )
65
"""simple docstring""" import requests def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Tuple = {"""Content-Type""": """application/json"""} UpperCAmelCase__ : Optional[Any] = requests.post(__UpperCamelCase , json={"""text""": message_body} , headers=__UpperCamelCase ) if response.status_code != 200: UpperCAmelCase__ : Any = ( """Request to slack returned an error """ F"{response.status_code}, the response is:\n{response.text}" ) raise ValueError(__UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
65
1
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCAmelCase = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class __lowercase ( unittest.TestCase ): def __init__( self : str ,A : str ,A : int=7 ,A : Tuple=3 ,A : Dict=18 ,A : Dict=30 ,A : Optional[int]=400 ,A : Dict=None ,A : str=True ,A : Any=True ,A : str=None ,): '''simple docstring''' UpperCAmelCase__ : str = size if size is not None else {"""height""": 20, """width""": 20} UpperCAmelCase__ : Union[str, Any] = parent UpperCAmelCase__ : Optional[Any] = batch_size UpperCAmelCase__ : str = num_channels UpperCAmelCase__ : List[str] = image_size UpperCAmelCase__ : Tuple = min_resolution UpperCAmelCase__ : Any = max_resolution UpperCAmelCase__ : Any = size UpperCAmelCase__ : int = do_normalize UpperCAmelCase__ : Any = do_convert_rgb UpperCAmelCase__ : Optional[int] = [512, 1_024, 2_048, 4_096] UpperCAmelCase__ : List[str] = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg""" UpperCAmelCase__ : List[str] = Image.open(requests.get(A ,stream=A ).raw ).convert("""RGB""" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = PixaStructImageProcessor if is_vision_available() else None def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = PixaStructImageProcessingTester(self ) @property def __lowercase ( self : List[Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"""do_normalize""" ) ) self.assertTrue(hasattr(A ,"""do_convert_rgb""" ) ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ : Tuple = 2_048 UpperCAmelCase__ : Union[str, Any] = image_processor(A ,return_tensors="""pt""" ,max_patches=A ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() ,torch.tensor(0.0_6_0_6 ) ,atol=1e-3 ,rtol=1e-3 ) ) def __lowercase ( self : Tuple ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase__ : Optional[Any] = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Union[str, Any] = image_processor( image_inputs[0] ,return_tensors="""pt""" ,max_patches=A ).flattened_patches self.assertEqual( encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,) # Test batched UpperCAmelCase__ : Any = image_processor( A ,return_tensors="""pt""" ,max_patches=A ).flattened_patches self.assertEqual( encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,) def __lowercase ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase__ : Tuple = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase__ : str = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(A ): UpperCAmelCase__ : Any = image_processor( image_inputs[0] ,return_tensors="""pt""" ,max_patches=A ).flattened_patches UpperCAmelCase__ : List[str] = """Hello""" UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] ,return_tensors="""pt""" ,max_patches=A ,header_text=A ).flattened_patches self.assertEqual( encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,) # Test batched UpperCAmelCase__ : Optional[int] = image_processor( A ,return_tensors="""pt""" ,max_patches=A ,header_text=A ).flattened_patches self.assertEqual( encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,) def __lowercase ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) UpperCAmelCase__ : Any = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : int = image_processor( image_inputs[0] ,return_tensors="""pt""" ,max_patches=A ).flattened_patches self.assertEqual( encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,) # Test batched UpperCAmelCase__ : Union[str, Any] = image_processor( A ,return_tensors="""pt""" ,max_patches=A ).flattened_patches self.assertEqual( encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,) def __lowercase ( self : Tuple ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input UpperCAmelCase__ : Tuple = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Optional[int] = image_processor( image_inputs[0] ,return_tensors="""pt""" ,max_patches=A ).flattened_patches self.assertEqual( encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,) # Test batched UpperCAmelCase__ : Union[str, Any] = image_processor( A ,return_tensors="""pt""" ,max_patches=A ).flattened_patches self.assertEqual( encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = PixaStructImageProcessor if is_vision_available() else None def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = PixaStructImageProcessingTester(self ,num_channels=4 ) UpperCAmelCase__ : List[Any] = 3 @property def __lowercase ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"""do_normalize""" ) ) self.assertTrue(hasattr(A ,"""do_convert_rgb""" ) ) def __lowercase ( self : int ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase__ : str = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : List[str] = image_processor( image_inputs[0] ,return_tensors="""pt""" ,max_patches=A ).flattened_patches self.assertEqual( encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,) # Test batched UpperCAmelCase__ : Optional[Any] = image_processor( A ,return_tensors="""pt""" ,max_patches=A ).flattened_patches self.assertEqual( encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
65
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = CTRLTokenizer snake_case_ = False snake_case_ = False def __lowercase ( self : List[str] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase__ : Optional[int] = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase__ : int = {"""unk_token""": """<unk>"""} UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def __lowercase ( self : int ,**A : Dict ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : List[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = """adapt react readapt apt""" UpperCAmelCase__ : Any = """adapt react readapt apt""" return input_text, output_text def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase__ : Tuple = """adapt react readapt apt""" UpperCAmelCase__ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase__ : Dict = tokenizer.tokenize(A ) self.assertListEqual(A ,A ) UpperCAmelCase__ : Any = tokens + [tokenizer.unk_token] UpperCAmelCase__ : Dict = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
65
1
"""simple docstring""" import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowerCAmelCase ( ): '''simple docstring''' raise RuntimeError("""CUDA out of memory.""" ) class __lowercase ( nn.Module ): def __init__( self : Optional[int] ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Union[str, Any] = nn.Linear(3 ,4 ) UpperCAmelCase__ : List[Any] = nn.BatchNormad(4 ) UpperCAmelCase__ : Any = nn.Linear(4 ,5 ) def __lowercase ( self : Tuple ,A : Any ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(A ) ) ) class __lowercase ( unittest.TestCase ): def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A : Tuple ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(A ,[128, 64, 32, 16, 8] ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A : int ,A : List[str] ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase__ , UpperCAmelCase__ : Dict = mock_training_loop_function("""hello""" ) self.assertListEqual(A ,[128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] ,[8, """hello"""] ) def __lowercase ( self : Optional[int] ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(A : str ): pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" ,cm.exception.args[0] ) def __lowercase ( self : List[str] ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A : Tuple ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" ,cm.exception.args[0] ) def __lowercase ( self : List[Any] ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A : str ,A : Tuple ,A : int ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(A ) as cm: mock_training_loop_function(128 ,"""hello""" ,"""world""" ) self.assertIn("""Batch size was passed into `f`""" ,cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" ,cm.exception.args[0] ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A : Tuple ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" ,cm.exception.args[0] ) @require_cuda def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = torch.cuda.memory_allocated() UpperCAmelCase__ : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() ,A ) UpperCAmelCase__ : Optional[Any] = release_memory(A ) self.assertEqual(torch.cuda.memory_allocated() ,A )
65
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
65
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer __UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __UpperCAmelCase = { 'vocab_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt' ), 'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt', 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json' ), 'google/electra-base-generator': ( 'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json' ), 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json' ), }, } __UpperCAmelCase = { 'google/electra-small-generator': 512, 'google/electra-base-generator': 512, 'google/electra-large-generator': 512, 'google/electra-small-discriminator': 512, 'google/electra-base-discriminator': 512, 'google/electra-large-discriminator': 512, } __UpperCAmelCase = { 'google/electra-small-generator': {'do_lower_case': True}, 'google/electra-base-generator': {'do_lower_case': True}, 'google/electra-large-generator': {'do_lower_case': True}, 'google/electra-small-discriminator': {'do_lower_case': True}, 'google/electra-base-discriminator': {'do_lower_case': True}, 'google/electra-large-discriminator': {'do_lower_case': True}, } class __lowercase ( __lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_INIT_CONFIGURATION snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ElectraTokenizer def __init__( self : int ,A : Union[str, Any]=None ,A : Tuple=None ,A : Optional[Any]=True ,A : List[Any]="[UNK]" ,A : Any="[SEP]" ,A : Dict="[PAD]" ,A : Any="[CLS]" ,A : int="[MASK]" ,A : int=True ,A : List[Any]=None ,**A : Dict ,): '''simple docstring''' super().__init__( A ,tokenizer_file=A ,do_lower_case=A ,unk_token=A ,sep_token=A ,pad_token=A ,cls_token=A ,mask_token=A ,tokenize_chinese_chars=A ,strip_accents=A ,**A ,) UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,A ) != do_lower_case or normalizer_state.get("""strip_accents""" ,A ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,A ) != tokenize_chinese_chars ): UpperCAmelCase__ : Any = getattr(A ,normalizer_state.pop("""type""" ) ) UpperCAmelCase__ : int = do_lower_case UpperCAmelCase__ : List[Any] = strip_accents UpperCAmelCase__ : Any = tokenize_chinese_chars UpperCAmelCase__ : Tuple = normalizer_class(**A ) UpperCAmelCase__ : Union[str, Any] = do_lower_case def __lowercase ( self : List[Any] ,A : Dict ,A : str=None ): '''simple docstring''' UpperCAmelCase__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowercase ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id] UpperCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self : int ,A : str ,A : Optional[str] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self._tokenizer.model.save(A ,name=A ) return tuple(A )
65
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """is_longer"""] def __init__( self : str ,A : Union[str, Any]=64 ,A : Tuple=48_000 ,A : Dict=480 ,A : List[str]=10 ,A : str=1_024 ,A : Any=0.0 ,A : Optional[int]=False ,A : float = 0 ,A : float = 14_000 ,A : int = None ,A : str = "fusion" ,A : str = "repeatpad" ,**A : List[Any] ,): '''simple docstring''' super().__init__( feature_size=A ,sampling_rate=A ,padding_value=A ,return_attention_mask=A ,**A ,) UpperCAmelCase__ : List[Any] = top_db UpperCAmelCase__ : Union[str, Any] = truncation UpperCAmelCase__ : Optional[int] = padding UpperCAmelCase__ : List[Any] = fft_window_size UpperCAmelCase__ : Optional[Any] = (fft_window_size >> 1) + 1 UpperCAmelCase__ : Any = hop_length UpperCAmelCase__ : List[str] = max_length_s UpperCAmelCase__ : List[Any] = max_length_s * sampling_rate UpperCAmelCase__ : List[Any] = sampling_rate UpperCAmelCase__ : Optional[int] = frequency_min UpperCAmelCase__ : Tuple = frequency_max UpperCAmelCase__ : List[str] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm=A ,mel_scale="""htk""" ,) UpperCAmelCase__ : str = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm="""slaney""" ,mel_scale="""slaney""" ,) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __lowercase ( self : List[str] ,A : np.array ,A : Optional[np.array] = None ): '''simple docstring''' UpperCAmelCase__ : Dict = spectrogram( A ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=A ,log_mel="""dB""" ,) return log_mel_spectrogram.T def __lowercase ( self : Optional[Any] ,A : Union[str, Any] ,A : int ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : List[str] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : int = [0] # randomly choose index for each part UpperCAmelCase__ : Tuple = np.random.choice(ranges[0] ) UpperCAmelCase__ : Tuple = np.random.choice(ranges[1] ) UpperCAmelCase__ : str = np.random.choice(ranges[2] ) UpperCAmelCase__ : List[str] = mel[idx_front : idx_front + chunk_frames, :] UpperCAmelCase__ : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] UpperCAmelCase__ : Dict = mel[idx_back : idx_back + chunk_frames, :] UpperCAmelCase__ : Optional[Any] = torch.tensor(mel[None, None, :] ) UpperCAmelCase__ : int = torch.nn.functional.interpolate( A ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=A ) UpperCAmelCase__ : Dict = mel_shrink[0][0].numpy() UpperCAmelCase__ : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def __lowercase ( self : Any ,A : np.array ,A : Optional[int] ,A : Any ,A : Tuple ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": UpperCAmelCase__ : int = True # random crop to max_length (for compatibility) -> this should be handled by self.pad UpperCAmelCase__ : str = len(A ) - max_length UpperCAmelCase__ : Optional[Any] = np.random.randint(0 ,overflow + 1 ) UpperCAmelCase__ : Optional[int] = waveform[idx : idx + max_length] UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": UpperCAmelCase__ : Tuple = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed UpperCAmelCase__ : int = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. UpperCAmelCase__ : List[Any] = np.stack([mel, mel, mel, mel] ,axis=0 ) UpperCAmelCase__ : Any = False else: UpperCAmelCase__ : Union[str, Any] = self._random_mel_fusion(A ,A ,A ) UpperCAmelCase__ : List[str] = True else: raise NotImplementedError(f"data_truncating {truncation} not implemented" ) else: UpperCAmelCase__ : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": UpperCAmelCase__ : str = int(max_length / len(A ) ) UpperCAmelCase__ : int = np.stack(np.tile(A ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": UpperCAmelCase__ : List[Any] = int(max_length / len(A ) ) UpperCAmelCase__ : str = np.stack(np.tile(A ,A ) ) UpperCAmelCase__ : Optional[Any] = np.pad(A ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": UpperCAmelCase__ : int = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : str ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : str = None ,A : Optional[str] = None ,A : Optional[int] = None ,A : Optional[int] = None ,A : Optional[Union[str, TensorType]] = None ,**A : List[str] ,): '''simple docstring''' UpperCAmelCase__ : Optional[int] = truncation if truncation is not None else self.truncation UpperCAmelCase__ : Dict = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[int] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : List[str] = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Any = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [np.asarray(A )] # convert to mel spectrogram, truncate and pad if needed. UpperCAmelCase__ : Tuple = [ self._get_input_mel(A ,max_length if max_length else self.nb_max_samples ,A ,A ) for waveform in raw_speech ] UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Tuple = [] for mel, longer in padded_inputs: input_mel.append(A ) is_longer.append(A ) if truncation == "fusion" and sum(A ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer UpperCAmelCase__ : List[str] = np.random.randint(0 ,len(A ) ) UpperCAmelCase__ : int = True if isinstance(input_mel[0] ,A ): UpperCAmelCase__ : Tuple = [np.asarray(A ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool UpperCAmelCase__ : List[str] = [[longer] for longer in is_longer] UpperCAmelCase__ : List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} UpperCAmelCase__ : str = BatchFeature(A ) if return_tensors is not None: UpperCAmelCase__ : int = input_features.convert_to_tensors(A ) return input_features
65
1
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCAmelCase = 16 __UpperCAmelCase = 32 def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 16 ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) UpperCAmelCase__ : int = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ : Optional[Any] = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ : Optional[int] = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase__ : List[Any] = 8 else: UpperCAmelCase__ : Union[str, Any] = None return tokenizer.pad( __UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. UpperCAmelCase__ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) UpperCAmelCase__ : Dict = DataLoader( tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __UpperCAmelCase = mocked_dataloaders # noqa: F811 def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1": UpperCAmelCase__ : Optional[Any] = 2 # Initialize accelerator UpperCAmelCase__ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ : Tuple = config["""lr"""] UpperCAmelCase__ : Dict = int(config["""num_epochs"""] ) UpperCAmelCase__ : Union[str, Any] = int(config["""seed"""] ) UpperCAmelCase__ : Any = int(config["""batch_size"""] ) UpperCAmelCase__ : Dict = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__UpperCamelCase ) def inner_training_loop(__UpperCamelCase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ : Tuple = AdamW(params=model.parameters() , lr=__UpperCamelCase ) UpperCAmelCase__ , UpperCAmelCase__ : Any = get_dataloaders(__UpperCamelCase , __UpperCamelCase ) # Instantiate scheduler UpperCAmelCase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = accelerator.prepare( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Now we train the model for epoch in range(__UpperCamelCase ): model.train() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase__ : Dict = model(**__UpperCamelCase ) UpperCAmelCase__ : Optional[int] = outputs.loss accelerator.backward(__UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : Dict = model(**__UpperCamelCase ) UpperCAmelCase__ : List[Any] = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__UpperCamelCase , references=__UpperCamelCase , ) UpperCAmelCase__ : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , __UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) UpperCAmelCase__ : Optional[Any] = parser.parse_args() UpperCAmelCase__ : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
65
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] ,A : Union[str, Any] ,A : Dict=7 ,A : Optional[int]=3 ,A : List[str]=18 ,A : Union[str, Any]=30 ,A : Tuple=400 ,A : Dict=True ,A : List[str]=None ,A : str=True ,A : Optional[Any]=False ,A : Optional[Any]=True ,A : List[str]=True ,A : Optional[int]=[0.5, 0.5, 0.5] ,A : List[str]=[0.5, 0.5, 0.5] ,): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : Union[str, Any] = image_size UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : Optional[int] = max_resolution UpperCAmelCase__ : str = do_resize UpperCAmelCase__ : Tuple = size if size is not None else {"""height""": 18, """width""": 20} UpperCAmelCase__ : List[str] = do_thumbnail UpperCAmelCase__ : Optional[int] = do_align_axis UpperCAmelCase__ : Union[str, Any] = do_pad UpperCAmelCase__ : Tuple = do_normalize UpperCAmelCase__ : Optional[Any] = image_mean UpperCAmelCase__ : List[Any] = image_std def __lowercase ( self : Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = DonutImageProcessor if is_vision_available() else None def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = DonutImageProcessingTester(self ) @property def __lowercase ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"""do_resize""" ) ) self.assertTrue(hasattr(A ,"""size""" ) ) self.assertTrue(hasattr(A ,"""do_thumbnail""" ) ) self.assertTrue(hasattr(A ,"""do_align_long_axis""" ) ) self.assertTrue(hasattr(A ,"""do_pad""" ) ) self.assertTrue(hasattr(A ,"""do_normalize""" ) ) self.assertTrue(hasattr(A ,"""image_mean""" ) ) self.assertTrue(hasattr(A ,"""image_std""" ) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 20} ) UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) ) self.assertEqual(image_processor.size ,{"""height""": 84, """width""": 42} ) def __lowercase ( self : Dict ): '''simple docstring''' pass @is_flaky() def __lowercase ( self : int ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : Tuple = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) @is_flaky() def __lowercase ( self : List[str] ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : Optional[int] = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) @is_flaky() def __lowercase ( self : Any ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : List[Any] = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,)
65
1
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __UpperCAmelCase = random.Random() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ): '''simple docstring''' if rng is None: UpperCAmelCase__ : int = global_rng UpperCAmelCase__ : Dict = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __lowercase ( unittest.TestCase ): def __init__( self : List[Any] ,A : List[Any] ,A : Optional[int]=7 ,A : int=400 ,A : Optional[int]=2_000 ,A : Dict=2_048 ,A : Optional[Any]=128 ,A : str=1 ,A : str=512 ,A : Dict=30 ,A : Any=44_100 ,): '''simple docstring''' UpperCAmelCase__ : Any = parent UpperCAmelCase__ : Optional[int] = batch_size UpperCAmelCase__ : str = min_seq_length UpperCAmelCase__ : int = max_seq_length UpperCAmelCase__ : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase__ : Tuple = spectrogram_length UpperCAmelCase__ : str = feature_size UpperCAmelCase__ : str = num_audio_channels UpperCAmelCase__ : Any = hop_length UpperCAmelCase__ : int = chunk_length UpperCAmelCase__ : int = sampling_rate def __lowercase ( self : List[Any] ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __lowercase ( self : List[Any] ,A : Dict=False ,A : Any=False ): '''simple docstring''' def _flatten(A : Dict ): return list(itertools.chain(*A ) ) if equal_length: UpperCAmelCase__ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCAmelCase__ : Union[str, Any] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: UpperCAmelCase__ : int = [np.asarray(A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = TvltFeatureExtractor def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = TvltFeatureExtractionTester(self ) def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(A ,"""spectrogram_length""" ) ) self.assertTrue(hasattr(A ,"""feature_size""" ) ) self.assertTrue(hasattr(A ,"""num_audio_channels""" ) ) self.assertTrue(hasattr(A ,"""hop_length""" ) ) self.assertTrue(hasattr(A ,"""chunk_length""" ) ) self.assertTrue(hasattr(A ,"""sampling_rate""" ) ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ : Any = feat_extract_first.save_pretrained(A )[0] check_json_file_has_correct_format(A ) UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class.from_pretrained(A ) UpperCAmelCase__ : List[Any] = feat_extract_first.to_dict() UpperCAmelCase__ : Union[str, Any] = feat_extract_second.to_dict() UpperCAmelCase__ : List[Any] = dict_first.pop("""mel_filters""" ) UpperCAmelCase__ : Union[str, Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(A ,A ) ) self.assertEqual(A ,A ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ : Optional[int] = os.path.join(A ,"""feat_extract.json""" ) feat_extract_first.to_json_file(A ) UpperCAmelCase__ : List[str] = self.feature_extraction_class.from_json_file(A ) UpperCAmelCase__ : Dict = feat_extract_first.to_dict() UpperCAmelCase__ : Optional[Any] = feat_extract_second.to_dict() UpperCAmelCase__ : int = dict_first.pop("""mel_filters""" ) UpperCAmelCase__ : List[str] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(A ,A ) ) self.assertEqual(A ,A ) def __lowercase ( self : Tuple ): '''simple docstring''' # Initialize feature_extractor UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )] UpperCAmelCase__ : List[str] = [np.asarray(A ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase__ : Union[str, Any] = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44_100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched UpperCAmelCase__ : Optional[Any] = feature_extractor(A ,return_tensors="""np""" ,sampling_rate=44_100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking UpperCAmelCase__ : Optional[Any] = feature_extractor( A ,return_tensors="""np""" ,sampling_rate=44_100 ,mask_audio=A ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCAmelCase__ : str = np.asarray(A ) UpperCAmelCase__ : List[Any] = feature_extractor(A ,return_tensors="""np""" ,sampling_rate=44_100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __lowercase ( self : str ,A : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" ) # automatic decoding with librispeech UpperCAmelCase__ : List[str] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : str = self._load_datasamples(1 ) UpperCAmelCase__ : str = TvltFeatureExtractor() UpperCAmelCase__ : Dict = feature_extractor(A ,return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape ,(1, 1, 192, 128) ) UpperCAmelCase__ : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,A ,atol=1e-4 ) )
65
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class __lowercase ( __lowerCamelCase ): snake_case_ = """open-llama""" def __init__( self : Dict ,A : str=100_000 ,A : str=4_096 ,A : Optional[Any]=11_008 ,A : Tuple=32 ,A : str=32 ,A : Optional[int]="silu" ,A : List[Any]=2_048 ,A : str=0.0_2 ,A : Optional[int]=1e-6 ,A : int=True ,A : Tuple=0 ,A : str=1 ,A : Any=2 ,A : Optional[Any]=False ,A : int=True ,A : Any=0.1 ,A : Optional[Any]=0.1 ,A : Optional[Any]=True ,A : Union[str, Any]=True ,A : Tuple=None ,**A : Optional[int] ,): '''simple docstring''' UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : Optional[int] = rms_norm_eps UpperCAmelCase__ : Any = use_cache UpperCAmelCase__ : Optional[Any] = kwargs.pop( """use_memorry_efficient_attention""" ,A ) UpperCAmelCase__ : Any = hidden_dropout_prob UpperCAmelCase__ : str = attention_dropout_prob UpperCAmelCase__ : Optional[int] = use_stable_embedding UpperCAmelCase__ : Tuple = shared_input_output_embedding UpperCAmelCase__ : Tuple = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,tie_word_embeddings=A ,**A ,) def __lowercase ( self : Optional[Any] ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,A ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f"got {self.rope_scaling}" ) UpperCAmelCase__ : List[Any] = self.rope_scaling.get("""type""" ,A ) UpperCAmelCase__ : int = self.rope_scaling.get("""factor""" ,A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(A ,A ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
65
1
"""simple docstring""" from __future__ import annotations from collections.abc import Callable def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 100 , ): '''simple docstring''' UpperCAmelCase__ : List[Any] = x_start UpperCAmelCase__ : List[Any] = fnc(__UpperCamelCase ) UpperCAmelCase__ : Any = 0.0 for _ in range(__UpperCamelCase ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCAmelCase__ : str = (x_end - x_start) / steps + xa UpperCAmelCase__ : Optional[int] = fnc(__UpperCamelCase ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCAmelCase__ : int = xa UpperCAmelCase__ : Dict = fxa return area if __name__ == "__main__": def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') __UpperCAmelCase = 10 while i <= 10_0000: print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}") i *= 10
65
"""simple docstring""" from collections.abc import Callable class __lowercase : def __init__( self : Tuple ,A : Callable | None = None ): '''simple docstring''' # Stores actual heap items. UpperCAmelCase__ : list = [] # Stores indexes of each item for supporting updates and deletion. UpperCAmelCase__ : dict = {} # Stores current size of heap. UpperCAmelCase__ : Any = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. UpperCAmelCase__ : int = key or (lambda A : x) def __lowercase ( self : Union[str, Any] ,A : int ): '''simple docstring''' return int((i - 1) / 2 ) if i > 0 else None def __lowercase ( self : Tuple ,A : int ): '''simple docstring''' UpperCAmelCase__ : Any = int(2 * i + 1 ) return left if 0 < left < self.size else None def __lowercase ( self : Any ,A : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = int(2 * i + 2 ) return right if 0 < right < self.size else None def __lowercase ( self : List[Any] ,A : int ,A : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.arr[j], self.arr[i] def __lowercase ( self : Optional[int] ,A : int ,A : int ): '''simple docstring''' return self.arr[i][1] < self.arr[j][1] def __lowercase ( self : Optional[int] ,A : int ): '''simple docstring''' UpperCAmelCase__ : int = self._left(A ) UpperCAmelCase__ : Dict = self._right(A ) UpperCAmelCase__ : Optional[int] = i if left is not None and not self._cmp(A ,A ): UpperCAmelCase__ : List[Any] = left if right is not None and not self._cmp(A ,A ): UpperCAmelCase__ : List[Any] = right return valid_parent def __lowercase ( self : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : int = self._parent(A ) while parent is not None and not self._cmp(A ,A ): self._swap(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : int = parent, self._parent(A ) def __lowercase ( self : str ,A : int ): '''simple docstring''' UpperCAmelCase__ : Any = self._get_valid_parent(A ) while valid_parent != index: self._swap(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = valid_parent, self._get_valid_parent(A ) def __lowercase ( self : Optional[Any] ,A : int ,A : int ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase__ : Tuple = self.pos_map[item] UpperCAmelCase__ : Dict = [item, self.key(A )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(A ) self._heapify_down(A ) def __lowercase ( self : List[Any] ,A : int ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase__ : Any = self.pos_map[item] del self.pos_map[item] UpperCAmelCase__ : Dict = self.arr[self.size - 1] UpperCAmelCase__ : List[Any] = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(A ) self._heapify_down(A ) def __lowercase ( self : str ,A : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : Dict = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(A )] ) else: UpperCAmelCase__ : List[str] = [item, self.key(A )] UpperCAmelCase__ : Union[str, Any] = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __lowercase ( self : str ): '''simple docstring''' return self.arr[0] if self.size else None def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def lowerCAmelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
65
1
"""simple docstring""" from typing import Any def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' _validation( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) # Creates data structures and fill initial step UpperCAmelCase__ : dict = {} UpperCAmelCase__ : dict = {} for state in states_space: UpperCAmelCase__ : Optional[Any] = observations_space[0] UpperCAmelCase__ : Optional[int] = ( initial_probabilities[state] * emission_probabilities[state][observation] ) UpperCAmelCase__ : Any = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__UpperCamelCase ) ): UpperCAmelCase__ : Tuple = observations_space[o] UpperCAmelCase__ : Optional[Any] = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function UpperCAmelCase__ : List[str] = """""" UpperCAmelCase__ : Tuple = -1 for k_state in states_space: UpperCAmelCase__ : List[str] = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: UpperCAmelCase__ : Any = probability UpperCAmelCase__ : Dict = k_state # Update probabilities and pointers dicts UpperCAmelCase__ : str = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) UpperCAmelCase__ : Union[str, Any] = arg_max # The final observation UpperCAmelCase__ : Any = observations_space[len(__UpperCamelCase ) - 1] # argmax for given final observation UpperCAmelCase__ : Optional[Any] = """""" UpperCAmelCase__ : Any = -1 for k_state in states_space: UpperCAmelCase__ : Any = probabilities[(k_state, final_observation)] if probability > max_probability: UpperCAmelCase__ : str = probability UpperCAmelCase__ : Union[str, Any] = k_state UpperCAmelCase__ : Dict = arg_max # Process pointers backwards UpperCAmelCase__ : int = last_state UpperCAmelCase__ : Optional[Any] = [] for o in range(len(__UpperCamelCase ) - 1 , -1 , -1 ): result.append(__UpperCamelCase ) UpperCAmelCase__ : Tuple = pointers[previous, observations_space[o]] result.reverse() return result def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' _validate_not_empty( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) _validate_lists(__UpperCamelCase , __UpperCamelCase ) _validate_dicts( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' _validate_list(__UpperCamelCase , """observations_space""" ) _validate_list(__UpperCamelCase , """states_space""" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if not isinstance(_object , __UpperCamelCase ): UpperCAmelCase__ : Any = F"{var_name} must be a list" raise ValueError(__UpperCamelCase ) else: for x in _object: if not isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : Any = F"{var_name} must be a list of strings" raise ValueError(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' _validate_dict(__UpperCamelCase , """initial_probabilities""" , __UpperCamelCase ) _validate_nested_dict(__UpperCamelCase , """transition_probabilities""" ) _validate_nested_dict(__UpperCamelCase , """emission_probabilities""" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' _validate_dict(_object , __UpperCamelCase , __UpperCamelCase ) for x in _object.values(): _validate_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ): '''simple docstring''' if not isinstance(_object , __UpperCamelCase ): UpperCAmelCase__ : Union[str, Any] = F"{var_name} must be a dict" raise ValueError(__UpperCamelCase ) if not all(isinstance(__UpperCamelCase , __UpperCamelCase ) for x in _object ): UpperCAmelCase__ : Optional[Any] = F"{var_name} all keys must be strings" raise ValueError(__UpperCamelCase ) if not all(isinstance(__UpperCamelCase , __UpperCamelCase ) for x in _object.values() ): UpperCAmelCase__ : Any = """nested dictionary """ if nested else """""" UpperCAmelCase__ : Optional[int] = F"{var_name} {nested_text}all values must be {value_type.__name__}" raise ValueError(__UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
65
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """attention_mask"""] def __init__( self : Any ,A : str=80 ,A : Optional[int]=16_000 ,A : int=0.0 ,A : str=10 ,A : Any=25 ,A : str="hamming_window" ,A : int=3_2_7_6_8.0 ,A : List[str]=0.9_7 ,A : Optional[int]=1.0 ,A : Optional[Any]=True ,A : Tuple=True ,A : Any=False ,**A : int ,): '''simple docstring''' super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A ) UpperCAmelCase__ : str = feature_size UpperCAmelCase__ : int = sampling_rate UpperCAmelCase__ : int = padding_value UpperCAmelCase__ : Dict = hop_length UpperCAmelCase__ : int = win_length UpperCAmelCase__ : Dict = frame_signal_scale UpperCAmelCase__ : Dict = preemphasis_coeff UpperCAmelCase__ : str = mel_floor UpperCAmelCase__ : Any = normalize_means UpperCAmelCase__ : str = normalize_vars UpperCAmelCase__ : int = win_function UpperCAmelCase__ : List[Any] = return_attention_mask UpperCAmelCase__ : str = win_length * sampling_rate // 1_000 UpperCAmelCase__ : List[Any] = hop_length * sampling_rate // 1_000 UpperCAmelCase__ : int = optimal_fft_length(self.sample_size ) UpperCAmelCase__ : List[Any] = (self.n_fft // 2) + 1 def __lowercase ( self : Union[str, Any] ,A : np.array ): '''simple docstring''' if self.win_function == "hamming_window": UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A ) else: UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ) UpperCAmelCase__ : Union[str, Any] = mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,) UpperCAmelCase__ : Optional[Any] = spectrogram( one_waveform * self.frame_signal_scale ,window=A ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=A ,preemphasis=self.preemphasis_coeff ,mel_filters=A ,mel_floor=self.mel_floor ,log_mel="""log""" ,) return msfc_features.T def __lowercase ( self : str ,A : Any ,A : Optional[int] ,A : str ): '''simple docstring''' # make sure we normalize float32 arrays if self.normalize_means: UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 ) UpperCAmelCase__ : Any = np.subtract(A ,A ) if self.normalize_vars: UpperCAmelCase__ : str = x[:input_length].std(axis=0 ) UpperCAmelCase__ : Optional[int] = np.divide(A ,A ) if input_length < x.shape[0]: UpperCAmelCase__ : int = padding_value # make sure array is in float32 UpperCAmelCase__ : str = x.astype(np.floataa ) return x def __lowercase ( self : Union[str, Any] ,A : List[np.ndarray] ,A : Optional[np.ndarray] = None ): '''simple docstring''' UpperCAmelCase__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(A ,A ,self.padding_value ) for x, n in zip(A ,A )] def __call__( self : Union[str, Any] ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : Union[bool, str, PaddingStrategy] = False ,A : Optional[int] = None ,A : bool = False ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[Union[str, TensorType]] = None ,A : Optional[int] = None ,**A : Tuple ,): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[Any] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : Any = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : List[str] = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Union[str, Any] = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [raw_speech] # extract fbank features UpperCAmelCase__ : Tuple = [self._extract_mfsc_features(A ) for one_waveform in raw_speech] # convert into correct format for padding UpperCAmelCase__ : str = BatchFeature({"""input_features""": features} ) UpperCAmelCase__ : Optional[Any] = self.pad( A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,) # make sure list is in array format UpperCAmelCase__ : Tuple = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] ,A ): UpperCAmelCase__ : Union[str, Any] = [np.asarray(A ,dtype=np.floataa ) for feature in input_features] UpperCAmelCase__ : Dict = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: UpperCAmelCase__ : Union[str, Any] = ( np.array(A ,dtype=np.intaa ) if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) UpperCAmelCase__ : Any = self.normalize( padded_inputs["""input_features"""] ,attention_mask=A ) if return_tensors is not None: UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(A ) return padded_inputs
65
1
"""simple docstring""" class __lowercase : def __init__( self : List[Any] ,A : List[Any] ): '''simple docstring''' # we need a list not a string, so do something to change the type UpperCAmelCase__ : Any = arr.split(""",""" ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = [int(self.array[0] )] * len(self.array ) UpperCAmelCase__ : List[str] = [int(self.array[0] )] * len(self.array ) for i in range(1 ,len(self.array ) ): UpperCAmelCase__ : Optional[Any] = max( int(self.array[i] ) + sum_value[i - 1] ,int(self.array[i] ) ) UpperCAmelCase__ : Union[str, Any] = max(sum_value[i] ,rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": __UpperCAmelCase = input('please input some numbers:') __UpperCAmelCase = SubArray(whole_array) __UpperCAmelCase = array.solve_sub_array() print(('the results is:', re))
65
"""simple docstring""" from math import factorial def lowerCAmelCase ( __UpperCamelCase = 100 ): '''simple docstring''' return sum(int(__UpperCamelCase ) for x in str(factorial(__UpperCamelCase ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
65
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
65
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class __lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] ,A : Optional[int] ,A : int=13 ,A : Tuple=7 ,A : Dict=True ,A : Optional[int]=True ,A : Tuple=True ,A : str=True ,A : Any=99 ,A : Tuple=32 ,A : Dict=5 ,A : Optional[int]=4 ,A : Dict=37 ,A : Any="gelu" ,A : Any=0.1 ,A : Optional[int]=0.1 ,A : Union[str, Any]=512 ,A : Any=16 ,A : List[str]=2 ,A : List[Any]=0.0_2 ,A : Optional[int]=4 ,): '''simple docstring''' UpperCAmelCase__ : Dict = parent UpperCAmelCase__ : Any = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : Optional[int] = is_training UpperCAmelCase__ : Optional[Any] = use_attention_mask UpperCAmelCase__ : int = use_token_type_ids UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Any = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Dict = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : Any = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : List[Any] = type_vocab_size UpperCAmelCase__ : List[str] = type_sequence_label_size UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = num_choices def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase__ : List[str] = None if self.use_attention_mask: UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : int = DistilBertConfig( vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=A ,) return config, input_ids, attention_mask def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs UpperCAmelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = FlaxDistilBertModelTester(self ) @slow def __lowercase ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" ) UpperCAmelCase__ : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(A ) @require_flax class __lowercase ( unittest.TestCase ): @slow def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) UpperCAmelCase__ : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) UpperCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCAmelCase__ : Dict = model(A ,attention_mask=A )[0] UpperCAmelCase__ : List[Any] = (1, 11, 768) self.assertEqual(output.shape ,A ) UpperCAmelCase__ : Any = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,A ,atol=1e-4 ) )
65
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __lowercase ( unittest.TestCase ): @slow def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ,return_dict=A ).to(A ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" ) UpperCAmelCase__ : Union[str, Any] = tokenizer("""Hello there""" ,return_tensors="""pt""" ).input_ids UpperCAmelCase__ : Dict = tokenizer("""Hi I am""" ,return_tensors="""pt""" ).input_ids UpperCAmelCase__ : str = model(input_ids.to(A ) ,labels=labels.to(A ) ).loss UpperCAmelCase__ : Optional[Any] = -(labels.shape[-1] * loss.item()) UpperCAmelCase__ : Tuple = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
65
"""simple docstring""" __UpperCAmelCase = frozenset( [ 'prompt', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'prompt', 'image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # Text guided image variation with an image mask 'prompt', 'image', 'mask_image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # image variation with an image mask 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image', 'mask_image']) __UpperCAmelCase = frozenset( [ 'example_image', 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset( [ 'prompt', 'audio_length_in_s', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset(['input_tokens']) __UpperCAmelCase = frozenset(['input_tokens'])
65
1
"""simple docstring""" from collections.abc import Sequence def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : List[str] = 0.0 for coeff in reversed(__UpperCamelCase ): UpperCAmelCase__ : Optional[int] = result * x + coeff return result if __name__ == "__main__": __UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0) __UpperCAmelCase = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
65
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __lowercase ( unittest.TestCase ): def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split() UpperCAmelCase__ : Tuple = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[Any] = { """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>""", } UpperCAmelCase__ : int = { """feature_size""": 1, """padding_value""": 0.0, """sampling_rate""": 16_000, """return_attention_mask""": False, """do_normalize""": True, } UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp() UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,A ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) # load decoder from hub UpperCAmelCase__ : int = """hf-internal-testing/ngram-beam-search-decoder""" def __lowercase ( self : str ,**A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.add_kwargs_tokens_map.copy() kwargs.update(A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : List[str] ,**A : Dict ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Any ,**A : List[Any] ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**A ) def __lowercase ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,A ) # decoder self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,) self.assertIsInstance(processor.decoder ,A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha ,5.0 ) self.assertEqual(processor.language_model.beta ,3.0 ) self.assertEqual(processor.language_model.score_boundary ,-7.0 ) self.assertEqual(processor.language_model.unk_score_offset ,3 ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["""xx"""] ) with self.assertRaisesRegex(A ,"""include""" ): WavaVecaProcessorWithLM( tokenizer=A ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : str = floats_list((3, 1_000) ) UpperCAmelCase__ : Optional[Any] = feature_extractor(A ,return_tensors="""np""" ) UpperCAmelCase__ : List[Any] = processor(A ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[int] = self.get_decoder() UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : List[Any] = """This is a test string""" UpperCAmelCase__ : int = processor(text=A ) UpperCAmelCase__ : Dict = tokenizer(A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def __lowercase ( self : Tuple ,A : List[Any]=(2, 10, 16) ,A : Dict=77 ): '''simple docstring''' np.random.seed(A ) return np.random.rand(*A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Dict = self._get_dummy_logits(shape=(10, 16) ,seed=13 ) UpperCAmelCase__ : Tuple = processor.decode(A ) UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(A )[0] self.assertEqual(decoded_decoder[0] ,decoded_processor.text ) self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text ) self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score ) @parameterized.expand([[None], ["""fork"""], ["""spawn"""]] ) def __lowercase ( self : List[str] ,A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : List[str] = processor.batch_decode(A ) else: with get_context(A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(A ,A ) UpperCAmelCase__ : Optional[Any] = list(A ) with get_context("""fork""" ).Pool() as p: UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams_batch(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(A ,decoded_processor.text ) self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text ) self.assertListEqual(A ,decoded_processor.logit_score ) self.assertListEqual(A ,decoded_processor.lm_score ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Any = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : List[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,) UpperCAmelCase__ : List[str] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(A ) with get_context("""fork""" ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( A ,A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,) UpperCAmelCase__ : List[Any] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : List[str] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(A ,A ) self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,A ) self.assertTrue(np.array_equal(A ,decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,A ,atol=1e-3 ) ) self.assertTrue(np.array_equal(A ,decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,A ,atol=1e-3 ) ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : Tuple = 2.0 UpperCAmelCase__ : str = 5.0 UpperCAmelCase__ : Union[str, Any] = -2_0.0 UpperCAmelCase__ : Optional[Any] = True UpperCAmelCase__ : str = processor.batch_decode( A ,alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,) UpperCAmelCase__ : Any = decoded_processor_out.text UpperCAmelCase__ : Union[str, Any] = list(A ) decoder.reset_params( alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,) with get_context("""fork""" ).Pool() as pool: UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch( A ,A ,) UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(A ,A ) self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,A ) UpperCAmelCase__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha ,2.0 ) self.assertEqual(lm_model.beta ,5.0 ) self.assertEqual(lm_model.unk_score_offset ,-2_0.0 ) self.assertEqual(lm_model.score_boundary ,A ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : str = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCAmelCase__ : Optional[int] = os.listdir(A ) UpperCAmelCase__ : List[Any] = ["""alphabet.json""", """language_model"""] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(A ,A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(A ) UpperCAmelCase__ : Tuple = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCAmelCase__ : Tuple = os.listdir(A ) UpperCAmelCase__ : Dict = os.listdir(A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(A ,A ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Dict = floats_list((3, 1_000) ) UpperCAmelCase__ : List[str] = processor_wavaveca(A ,return_tensors="""np""" ) UpperCAmelCase__ : Dict = processor_auto(A ,return_tensors="""np""" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 ) UpperCAmelCase__ : List[str] = self._get_dummy_logits() UpperCAmelCase__ : Tuple = processor_wavaveca.batch_decode(A ) UpperCAmelCase__ : List[str] = processor_auto.batch_decode(A ) self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) self.assertListEqual( processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,) @staticmethod def __lowercase ( A : Optional[Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets] return retrieved_list def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Dict = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(A ,output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(A ,A ) ) self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : int = self._get_dummy_logits() UpperCAmelCase__ : Any = processor.batch_decode(A ,output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(A ,A ) ) self.assertListEqual( [""" """.join(self.get_from_offsets(A ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] ) @slow @require_torch @require_torchaudio def __lowercase ( self : Tuple ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=A ) UpperCAmelCase__ : Tuple = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : Tuple = iter(A ) UpperCAmelCase__ : Optional[int] = next(A ) UpperCAmelCase__ : List[Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) UpperCAmelCase__ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : Tuple = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(A ).logits.cpu().numpy() UpperCAmelCase__ : Any = processor.decode(logits[0] ,output_word_offsets=A ) UpperCAmelCase__ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Union[str, Any] = [ { """start_time""": d["""start_offset"""] * time_offset, """end_time""": d["""end_offset"""] * time_offset, """word""": d["""word"""], } for d in output["""word_offsets"""] ] UpperCAmelCase__ : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL""" # output words self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,A ) self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,output.text ) # output times UpperCAmelCase__ : str = torch.tensor(self.get_from_offsets(A ,"""start_time""" ) ) UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(A ,"""end_time""" ) ) # fmt: off UpperCAmelCase__ : Union[str, Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) ) self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
65
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class __lowercase ( unittest.TestCase ): def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Any = tempfile.mkdtemp() # fmt: off UpperCAmelCase__ : str = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on UpperCAmelCase__ : Dict = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Tuple = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] UpperCAmelCase__ : Dict = {"""unk_token""": """<unk>"""} UpperCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) UpperCAmelCase__ : Optional[int] = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } UpperCAmelCase__ : str = os.path.join(self.tmpdirname ,A ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(A ,A ) def __lowercase ( self : Any ,**A : List[str] ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Tuple ,**A : Union[str, Any] ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Tuple ,**A : Tuple ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : List[Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] UpperCAmelCase__ : List[str] = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs] return image_inputs def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase__ : List[str] = self.get_image_processor() UpperCAmelCase__ : Union[str, Any] = CLIPSegProcessor(tokenizer=A ,image_processor=A ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : List[str] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=A ) UpperCAmelCase__ : Tuple = CLIPSegProcessor(tokenizer=A ,image_processor=A ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : int = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,A ) self.assertIsInstance(processor_fast.tokenizer ,A ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,A ) self.assertIsInstance(processor_fast.image_processor ,A ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) UpperCAmelCase__ : Optional[int] = self.get_image_processor(do_normalize=A ,padding_value=1.0 ) UpperCAmelCase__ : Tuple = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=A ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,A ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,A ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_image_processor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : str = CLIPSegProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase__ : str = self.prepare_image_inputs() UpperCAmelCase__ : Union[str, Any] = image_processor(A ,return_tensors="""np""" ) UpperCAmelCase__ : int = processor(images=A ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_image_processor() UpperCAmelCase__ : Any = self.get_tokenizer() UpperCAmelCase__ : Any = CLIPSegProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase__ : Any = """lower newer""" UpperCAmelCase__ : Optional[Any] = processor(text=A ) UpperCAmelCase__ : Optional[Any] = tokenizer(A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.get_image_processor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Optional[int] = CLIPSegProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase__ : Union[str, Any] = """lower newer""" UpperCAmelCase__ : List[str] = self.prepare_image_inputs() UpperCAmelCase__ : Any = processor(text=A ,images=A ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(A ): processor() def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_image_processor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Dict = CLIPSegProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase__ : Optional[int] = self.prepare_image_inputs() UpperCAmelCase__ : List[Any] = self.prepare_image_inputs() UpperCAmelCase__ : str = processor(images=A ,visual_prompt=A ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """conditional_pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(A ): processor() def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_image_processor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = CLIPSegProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase__ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase__ : Tuple = processor.batch_decode(A ) UpperCAmelCase__ : Union[str, Any] = tokenizer.batch_decode(A ) self.assertListEqual(A ,A )
65
"""simple docstring""" from sklearn.metrics import fa_score import datasets __UpperCAmelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' __UpperCAmelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' __UpperCAmelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): def __lowercase ( self : List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,) def __lowercase ( self : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Optional[Any]=None ,A : List[str]=1 ,A : Optional[Any]="binary" ,A : Any=None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = fa_score( A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A ) return {"f1": float(A ) if score.size == 1 else score}
65
1
"""simple docstring""" import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) __UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __lowercase : snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Model type selected in the list: """ + """, """.join(__lowerCamelCase )} ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} ) snake_case_ = field( default=1_2_8 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) snake_case_ = field( default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , ) snake_case_ = field( default=6_4 , metadata={ """help""": ( """The maximum number of tokens for the question. Questions longer than this will """ """be truncated to this length.""" ) } , ) snake_case_ = field( default=3_0 , metadata={ """help""": ( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ) } , ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) snake_case_ = field( default=__lowerCamelCase , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} ) snake_case_ = field( default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) snake_case_ = field( default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) snake_case_ = field( default=0 , metadata={ """help""": ( """language id of input for language-specific xlm models (see""" """ tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)""" ) } , ) snake_case_ = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} ) class __lowercase ( __lowerCamelCase ): snake_case_ = """train""" snake_case_ = """dev""" class __lowercase ( __lowerCamelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self : List[str] ,A : SquadDataTrainingArguments ,A : PreTrainedTokenizer ,A : Optional[int] = None ,A : Union[str, Split] = Split.train ,A : Optional[bool] = False ,A : Optional[str] = None ,A : Optional[str] = "pt" ,): '''simple docstring''' UpperCAmelCase__ : Dict = args UpperCAmelCase__ : Optional[Any] = is_language_sensitive UpperCAmelCase__ : str = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(A ,A ): try: UpperCAmelCase__ : int = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) UpperCAmelCase__ : List[Any] = mode # Load data features from cache or dataset file UpperCAmelCase__ : str = """v2""" if args.version_2_with_negative else """v1""" UpperCAmelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir ,f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" ,) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCAmelCase__ : int = cached_features_file + """.lock""" with FileLock(A ): if os.path.exists(A ) and not args.overwrite_cache: UpperCAmelCase__ : Optional[Any] = time.time() UpperCAmelCase__ : List[Any] = torch.load(A ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. UpperCAmelCase__ : Any = self.old_features["""features"""] UpperCAmelCase__ : Tuple = self.old_features.get("""dataset""" ,A ) UpperCAmelCase__ : Tuple = self.old_features.get("""examples""" ,A ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" ,time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" """ future run""" ) else: if mode == Split.dev: UpperCAmelCase__ : Tuple = self.processor.get_dev_examples(args.data_dir ) else: UpperCAmelCase__ : Dict = self.processor.get_train_examples(args.data_dir ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = squad_convert_examples_to_features( examples=self.examples ,tokenizer=A ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=A ,) UpperCAmelCase__ : str = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} ,A ,) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : str ,A : List[Any] ): '''simple docstring''' # Convert to Tensors and build dataset UpperCAmelCase__ : Optional[Any] = self.features[i] UpperCAmelCase__ : Any = torch.tensor(feature.input_ids ,dtype=torch.long ) UpperCAmelCase__ : int = torch.tensor(feature.attention_mask ,dtype=torch.long ) UpperCAmelCase__ : Tuple = torch.tensor(feature.token_type_ids ,dtype=torch.long ) UpperCAmelCase__ : List[Any] = torch.tensor(feature.cls_index ,dtype=torch.long ) UpperCAmelCase__ : Optional[int] = torch.tensor(feature.p_mask ,dtype=torch.float ) UpperCAmelCase__ : Optional[int] = torch.tensor(feature.is_impossible ,dtype=torch.float ) UpperCAmelCase__ : List[str] = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} ) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible} ) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: UpperCAmelCase__ : Union[str, Any] = torch.tensor(feature.start_position ,dtype=torch.long ) UpperCAmelCase__ : Dict = torch.tensor(feature.end_position ,dtype=torch.long ) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} ) return inputs
65
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model') __UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'} __UpperCAmelCase = '>>zh<<' __UpperCAmelCase = 'Helsinki-NLP/' if is_torch_available(): __UpperCAmelCase = 'pt' elif is_tf_available(): __UpperCAmelCase = 'tf' else: __UpperCAmelCase = 'jax' @require_sentencepiece class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = MarianTokenizer snake_case_ = False snake_case_ = True def __lowercase ( self : Optional[int] ): '''simple docstring''' super().setUp() UpperCAmelCase__ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] UpperCAmelCase__ : int = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(A ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(A ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) UpperCAmelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase ( self : List[Any] ,**A : List[Any] ): '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Union[str, Any] ,A : Tuple ): '''simple docstring''' return ( "This is a test", "This is a test", ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = """</s>""" UpperCAmelCase__ : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""</s>""" ) self.assertEqual(vocab_keys[1] ,"""<unk>""" ) self.assertEqual(vocab_keys[-1] ,"""<pad>""" ) self.assertEqual(len(A ) ,9 ) def __lowercase ( self : Dict ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,9 ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" ) UpperCAmelCase__ : List[str] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=A ) self.assertIsInstance(A ,A ) UpperCAmelCase__ : str = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(A ,batch.input_ids[0] ) UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(A ) UpperCAmelCase__ : Tuple = [x.name for x in Path(A ).glob("""*""" )] self.assertIn("""source.spm""" ,A ) MarianTokenizer.from_pretrained(A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] ,padding=A ,truncation=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch.input_ids.shape ,(2, 512) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) ) @slow def __lowercase ( self : Dict ): '''simple docstring''' # fmt: off UpperCAmelCase__ : Optional[int] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) UpperCAmelCase__ : Any = """Tämä on testi""" UpperCAmelCase__ : int = """This is a test""" UpperCAmelCase__ : List[str] = [76, 7, 2_047, 2] UpperCAmelCase__ : Optional[Any] = [69, 12, 11, 940, 2] UpperCAmelCase__ : List[str] = tokenizer(A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : Optional[int] = tokenizer(text_target=A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A ) self.assertEqual(A ,A )
65
1
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __UpperCAmelCase = pd.read_csv('sample_data.csv', header=None) __UpperCAmelCase = df.shape[:1][0] # If you're using some other dataset input the target column __UpperCAmelCase = df.iloc[:, 1:2] __UpperCAmelCase = actual_data.values.reshape(len_data, 1) __UpperCAmelCase = MinMaxScaler().fit_transform(actual_data) __UpperCAmelCase = 10 __UpperCAmelCase = 5 __UpperCAmelCase = 20 __UpperCAmelCase = len_data - periods * look_back __UpperCAmelCase = actual_data[:division] __UpperCAmelCase = actual_data[division - look_back :] __UpperCAmelCase, __UpperCAmelCase = [], [] __UpperCAmelCase, __UpperCAmelCase = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __UpperCAmelCase = np.array(train_x) __UpperCAmelCase = np.array(test_x) __UpperCAmelCase = np.array([list(i.ravel()) for i in train_y]) __UpperCAmelCase = np.array([list(i.ravel()) for i in test_y]) __UpperCAmelCase = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss='mean_squared_error', optimizer='adam') __UpperCAmelCase = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) __UpperCAmelCase = model.predict(x_test)
65
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowercase ( metaclass=__lowerCamelCase ): snake_case_ = ["""onnx"""] def __init__( self : int ,*A : List[str] ,**A : int ): '''simple docstring''' requires_backends(self ,["""onnx"""] ) @classmethod def __lowercase ( cls : Optional[Any] ,*A : List[str] ,**A : Dict ): '''simple docstring''' requires_backends(cls ,["""onnx"""] ) @classmethod def __lowercase ( cls : List[Any] ,*A : Optional[int] ,**A : int ): '''simple docstring''' requires_backends(cls ,["""onnx"""] )
65
1
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowercase : def __init__( self : List[str] ,A : List[Any] ,A : List[str]=13 ,A : Any=32 ,A : List[str]=3 ,A : Optional[int]=4 ,A : Optional[int]=[10, 20, 30, 40] ,A : str=[2, 2, 3, 2] ,A : Optional[Any]=True ,A : Dict=True ,A : Tuple=37 ,A : List[str]="gelu" ,A : Optional[int]=10 ,A : List[Any]=0.0_2 ,A : Optional[int]=["stage2", "stage3", "stage4"] ,A : List[Any]=[2, 3, 4] ,A : List[Any]=None ,): '''simple docstring''' UpperCAmelCase__ : List[Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Union[str, Any] = image_size UpperCAmelCase__ : Any = num_channels UpperCAmelCase__ : Optional[int] = num_stages UpperCAmelCase__ : str = hidden_sizes UpperCAmelCase__ : List[Any] = depths UpperCAmelCase__ : str = is_training UpperCAmelCase__ : Dict = use_labels UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : List[Any] = hidden_act UpperCAmelCase__ : Optional[Any] = num_labels UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : List[Any] = out_features UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : Any = scope def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Tuple = None if self.use_labels: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] ,self.num_labels ) UpperCAmelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def __lowercase ( self : int ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=A ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,) def __lowercase ( self : str ,A : List[Any] ,A : Union[str, Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = ConvNextVaModel(config=A ) model.to(A ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(A ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def __lowercase ( self : Union[str, Any] ,A : Union[str, Any] ,A : Optional[Any] ,A : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = ConvNextVaForImageClassification(A ) model.to(A ) model.eval() UpperCAmelCase__ : Optional[int] = model(A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowercase ( self : int ,A : Optional[int] ,A : Optional[int] ,A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = ConvNextVaBackbone(config=A ) model.to(A ) model.eval() UpperCAmelCase__ : Tuple = model(A ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : str = ConvNextVaBackbone(config=A ) model.to(A ) model.eval() UpperCAmelCase__ : str = model(A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = config_and_inputs UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs UpperCAmelCase__ : Dict = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) snake_case_ = ( {"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = ConvNextVaModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 ) def __lowercase ( self : List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowercase ( self : List[str] ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def __lowercase ( self : str ): '''simple docstring''' pass def __lowercase ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_with_labels() UpperCAmelCase__ : int = True if model_class.__name__ in [ *get_values(A ), *get_values(A ), ]: continue UpperCAmelCase__ : Tuple = model_class(A ) model.to(A ) model.train() UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A ,return_labels=A ) UpperCAmelCase__ : Optional[int] = model(**A ).loss loss.backward() def __lowercase ( self : Tuple ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_with_labels() UpperCAmelCase__ : int = False UpperCAmelCase__ : List[Any] = True if ( model_class.__name__ in [*get_values(A ), *get_values(A )] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : Dict = model_class(A ) model.to(A ) model.gradient_checkpointing_enable() model.train() UpperCAmelCase__ : Tuple = self._prepare_for_class(A ,A ,return_labels=A ) UpperCAmelCase__ : Optional[Any] = model(**A ).loss loss.backward() def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : List[Any] = model_class(A ) UpperCAmelCase__ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Optional[Any] = [*signature.parameters.keys()] UpperCAmelCase__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,A ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def __lowercase ( self : Any ): '''simple docstring''' def check_hidden_states_output(A : Optional[Any] ,A : Union[str, Any] ,A : str ): UpperCAmelCase__ : List[str] = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): UpperCAmelCase__ : int = model(**self._prepare_for_class(A ,A ) ) UpperCAmelCase__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase__ : List[str] = self.model_tester.num_stages self.assertEqual(len(A ) ,expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = True check_hidden_states_output(A ,A ,A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : Tuple = True check_hidden_states_output(A ,A ,A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) @slow def __lowercase ( self : Optional[Any] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Tuple = ConvNextVaModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __lowercase ( unittest.TestCase ): @cached_property def __lowercase ( self : int ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A ) UpperCAmelCase__ : Any = self.default_image_processor UpperCAmelCase__ : str = prepare_img() UpperCAmelCase__ : List[Any] = preprocessor(images=A ,return_tensors="""pt""" ).to(A ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**A ) # verify the logits UpperCAmelCase__ : List[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,A ) UpperCAmelCase__ : Optional[Any] = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1e-4 ) )
65
"""simple docstring""" from argparse import ArgumentParser from .env import EnvironmentCommand def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) UpperCAmelCase__ : List[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(__UpperCamelCase ) # Let's go UpperCAmelCase__ : int = parser.parse_args() if not hasattr(__UpperCamelCase , """func""" ): parser.print_help() exit(1 ) # Run UpperCAmelCase__ : Union[str, Any] = args.func(__UpperCamelCase ) service.run() if __name__ == "__main__": main()
65
1
"""simple docstring""" from string import ascii_uppercase __UpperCAmelCase = {str(ord(c) - 55): c for c in ascii_uppercase} def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError("""int() can't convert non-string with explicit base""" ) if num < 0: raise ValueError("""parameter must be positive int""" ) if isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError("""'str' object cannot be interpreted as an integer""" ) if isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError("""'float' object cannot be interpreted as an integer""" ) if base in (0, 1): raise ValueError("""base must be >= 2""" ) if base > 36: raise ValueError("""base must be <= 36""" ) UpperCAmelCase__ : Tuple = """""" UpperCAmelCase__ : Union[str, Any] = 0 UpperCAmelCase__ : Any = 0 while div != 1: UpperCAmelCase__ , UpperCAmelCase__ : List[str] = divmod(__UpperCamelCase , __UpperCamelCase ) if base >= 11 and 9 < mod < 36: UpperCAmelCase__ : List[str] = ALPHABET_VALUES[str(__UpperCamelCase )] else: UpperCAmelCase__ : int = str(__UpperCamelCase ) new_value += actual_value UpperCAmelCase__ : Union[str, Any] = num // base UpperCAmelCase__ : Optional[int] = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(__UpperCamelCase ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
65
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __UpperCAmelCase = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __lowercase : snake_case_ = PegasusConfig snake_case_ = {} snake_case_ = """gelu""" def __init__( self : List[Any] ,A : int ,A : Optional[Any]=13 ,A : Dict=7 ,A : Dict=True ,A : Any=False ,A : Dict=99 ,A : int=32 ,A : Optional[int]=5 ,A : Union[str, Any]=4 ,A : Union[str, Any]=37 ,A : str=0.1 ,A : int=0.1 ,A : Optional[int]=20 ,A : Tuple=2 ,A : str=1 ,A : Optional[Any]=0 ,): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : int = is_training UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : Dict = hidden_size UpperCAmelCase__ : Optional[Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Any = intermediate_size UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = eos_token_id UpperCAmelCase__ : Union[str, Any] = pad_token_id UpperCAmelCase__ : List[str] = bos_token_id def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size ) UpperCAmelCase__ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 ) UpperCAmelCase__ : Any = np.concatenate([input_ids, eos_tensor] ,axis=1 ) UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase__ : str = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) UpperCAmelCase__ : Optional[Any] = prepare_pegasus_inputs_dict(A ,A ,A ) return config, inputs_dict def __lowercase ( self : Any ,A : Optional[int] ,A : str ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = 20 UpperCAmelCase__ : Dict = model_class_name(A ) UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A ) UpperCAmelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" ) UpperCAmelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,) UpperCAmelCase__ : Optional[int] = model.decode( decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,) UpperCAmelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" ) UpperCAmelCase__ : int = model.decode( decoder_input_ids[:, -1:] ,A ,decoder_attention_mask=A ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=A ,) UpperCAmelCase__ : Dict = model.decode(A ,A ) UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" ) def __lowercase ( self : Optional[int] ,A : str ,A : Dict ,A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = 20 UpperCAmelCase__ : str = model_class_name(A ) UpperCAmelCase__ : Any = model.encode(inputs_dict["""input_ids"""] ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) UpperCAmelCase__ : Optional[int] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] ,axis=-1 ,) UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A ) UpperCAmelCase__ : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,) UpperCAmelCase__ : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,) UpperCAmelCase__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" ) UpperCAmelCase__ : Dict = model.decode( decoder_input_ids[:, -1:] ,A ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=A ,decoder_position_ids=A ,) UpperCAmelCase__ : Union[str, Any] = model.decode(A ,A ,decoder_attention_mask=A ) UpperCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase__ : Union[str, Any] = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: UpperCAmelCase__ : Tuple = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) snake_case_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () snake_case_ = True snake_case_ = False snake_case_ = False snake_case_ = False def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = FlaxPegasusModelTester(self ) UpperCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=A ) def __lowercase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(A ,A ,A ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(A ,A ,A ) def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A ) UpperCAmelCase__ : int = model_class(A ) @jax.jit def encode_jitted(A : Optional[int] ,A : Union[str, Any]=None ,**A : Optional[Any] ): return model.encode(input_ids=A ,attention_mask=A ) with self.subTest("""JIT Enabled""" ): UpperCAmelCase__ : int = encode_jitted(**A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCAmelCase__ : Dict = encode_jitted(**A ).to_tuple() self.assertEqual(len(A ) ,len(A ) ) for jitted_output, output in zip(A ,A ): self.assertEqual(jitted_output.shape ,output.shape ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : Dict = model_class(A ) UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] ) UpperCAmelCase__ : Dict = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(A : List[Any] ,A : Any ,A : List[Any] ): return model.decode( decoder_input_ids=A ,decoder_attention_mask=A ,encoder_outputs=A ,) with self.subTest("""JIT Enabled""" ): UpperCAmelCase__ : Tuple = decode_jitted(**A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCAmelCase__ : str = decode_jitted(**A ).to_tuple() self.assertEqual(len(A ) ,len(A ) ) for jitted_output, output in zip(A ,A ): self.assertEqual(jitted_output.shape ,output.shape ) @slow def __lowercase ( self : List[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=A ) UpperCAmelCase__ : Any = np.ones((1, 1) ) UpperCAmelCase__ : Optional[Any] = model(A ) self.assertIsNotNone(A ) @slow def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) UpperCAmelCase__ : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) UpperCAmelCase__ : Union[str, Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] UpperCAmelCase__ : str = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] UpperCAmelCase__ : str = tokenizer(A ,return_tensors="""np""" ,truncation=A ,max_length=512 ,padding=A ) UpperCAmelCase__ : Union[str, Any] = model.generate(**A ,num_beams=2 ).sequences UpperCAmelCase__ : int = tokenizer.batch_decode(A ,skip_special_tokens=A ) assert tgt_text == decoded
65
1
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __lowercase ( unittest.TestCase ): def __init__( self : List[Any] ,A : Optional[Any] ,A : List[Any]=7 ,A : List[Any]=3 ,A : Optional[Any]=30 ,A : List[str]=400 ,A : Any=True ,A : str=None ,A : str=True ,A : int=[0.5, 0.5, 0.5] ,A : List[Any]=[0.5, 0.5, 0.5] ,A : Any=True ,A : Dict=1 / 255 ,A : str=True ,): '''simple docstring''' # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p UpperCAmelCase__ : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333} UpperCAmelCase__ : Dict = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Optional[int] = num_channels UpperCAmelCase__ : Optional[int] = min_resolution UpperCAmelCase__ : Union[str, Any] = max_resolution UpperCAmelCase__ : Optional[Any] = do_resize UpperCAmelCase__ : str = size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : int = image_mean UpperCAmelCase__ : Dict = image_std UpperCAmelCase__ : str = do_rescale UpperCAmelCase__ : List[Any] = rescale_factor UpperCAmelCase__ : Union[str, Any] = do_pad def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __lowercase ( self : str ,A : int ,A : Optional[int]=False ): '''simple docstring''' if not batched: UpperCAmelCase__ : List[Any] = image_inputs[0] if isinstance(A ,Image.Image ): UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : Tuple = int(self.size["""shortest_edge"""] * h / w ) UpperCAmelCase__ : Any = self.size["""shortest_edge"""] elif w > h: UpperCAmelCase__ : Tuple = self.size["""shortest_edge"""] UpperCAmelCase__ : Optional[Any] = int(self.size["""shortest_edge"""] * w / h ) else: UpperCAmelCase__ : Optional[int] = self.size["""shortest_edge"""] UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""] else: UpperCAmelCase__ : Any = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase__ : Dict = max(A ,key=lambda A : item[0] )[0] UpperCAmelCase__ : List[Any] = max(A ,key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = DeformableDetrImageProcessor if is_vision_available() else None def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = DeformableDetrImageProcessingTester(self ) @property def __lowercase ( self : str ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"""image_mean""" ) ) self.assertTrue(hasattr(A ,"""image_std""" ) ) self.assertTrue(hasattr(A ,"""do_normalize""" ) ) self.assertTrue(hasattr(A ,"""do_resize""" ) ) self.assertTrue(hasattr(A ,"""do_rescale""" ) ) self.assertTrue(hasattr(A ,"""do_pad""" ) ) self.assertTrue(hasattr(A ,"""size""" ) ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18, """longest_edge""": 1_333} ) self.assertEqual(image_processor.do_pad ,A ) UpperCAmelCase__ : int = self.image_processing_class.from_dict( self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=A ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad ,A ) def __lowercase ( self : List[Any] ): '''simple docstring''' pass def __lowercase ( self : Dict ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(A ,batched=A ) UpperCAmelCase__ : Union[str, Any] = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def __lowercase ( self : Dict ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input UpperCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched UpperCAmelCase__ : Dict = image_processing(A ,return_tensors="""pt""" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(A ,batched=A ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def __lowercase ( self : int ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input UpperCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched UpperCAmelCase__ : Optional[Any] = image_processing(A ,return_tensors="""pt""" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(A ,batched=A ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) @slow def __lowercase ( self : Dict ): '''simple docstring''' # prepare image and target UpperCAmelCase__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" ,"""r""" ) as f: UpperCAmelCase__ : List[Any] = json.loads(f.read() ) UpperCAmelCase__ : Union[str, Any] = {"""image_id""": 39_769, """annotations""": target} # encode them UpperCAmelCase__ : str = DeformableDetrImageProcessor() UpperCAmelCase__ : Union[str, Any] = image_processing(images=A ,annotations=A ,return_tensors="""pt""" ) # verify pixel values UpperCAmelCase__ : int = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape ,A ) UpperCAmelCase__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,A ,atol=1e-4 ) ) # verify area UpperCAmelCase__ : int = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,A ) ) # verify boxes UpperCAmelCase__ : Any = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,A ) UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,A ,atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : Dict = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,A ) ) # verify is_crowd UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,A ) ) # verify class_labels UpperCAmelCase__ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,A ) ) # verify orig_size UpperCAmelCase__ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,A ) ) # verify size UpperCAmelCase__ : int = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,A ) ) @slow def __lowercase ( self : str ): '''simple docstring''' # prepare image, target and masks_path UpperCAmelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" ,"""r""" ) as f: UpperCAmelCase__ : Optional[Any] = json.loads(f.read() ) UpperCAmelCase__ : str = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target} UpperCAmelCase__ : Optional[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them UpperCAmelCase__ : Union[str, Any] = DeformableDetrImageProcessor(format="""coco_panoptic""" ) UpperCAmelCase__ : Tuple = image_processing(images=A ,annotations=A ,masks_path=A ,return_tensors="""pt""" ) # verify pixel values UpperCAmelCase__ : Dict = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape ,A ) UpperCAmelCase__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,A ,atol=1e-4 ) ) # verify area UpperCAmelCase__ : List[str] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,A ) ) # verify boxes UpperCAmelCase__ : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,A ,atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : Tuple = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,A ) ) # verify is_crowd UpperCAmelCase__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,A ) ) # verify class_labels UpperCAmelCase__ : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,A ) ) # verify masks UpperCAmelCase__ : Any = 822_873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() ,A ) # verify orig_size UpperCAmelCase__ : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,A ) ) # verify size UpperCAmelCase__ : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,A ) )
65
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) UpperCAmelCase__ : Union[str, Any] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
65
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable __UpperCAmelCase = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
65
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def lowerCAmelCase ( __UpperCamelCase = "isbn/0140328726" ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: UpperCAmelCase__ : Dict = F"{olid} is not a valid Open Library olid" raise ValueError(__UpperCamelCase ) return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json() def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Any = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } UpperCAmelCase__ : Dict = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} UpperCAmelCase__ : str = [ get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""] ] UpperCAmelCase__ : Dict = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : Dict = """, """.join(__UpperCamelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __UpperCAmelCase = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.") continue print(F"\nSearching Open Library for ISBN: {isbn}...\n") try: __UpperCAmelCase = summarize_book(get_openlibrary_data(F"isbn/{isbn}")) print('\n'.join(F"{key}: {value}" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"Sorry, there are no results for ISBN: {isbn}.")
65
1
"""simple docstring""" import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowercase ( unittest.TestCase ): @slow def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("""google/mt5-small""" ) UpperCAmelCase__ : Tuple = tokenizer("""Hello there""" ,return_tensors="""np""" ).input_ids UpperCAmelCase__ : List[str] = tokenizer("""Hi I am""" ,return_tensors="""np""" ).input_ids UpperCAmelCase__ : Any = shift_tokens_right(A ,model.config.pad_token_id ,model.config.decoder_start_token_id ) UpperCAmelCase__ : Any = model(A ,decoder_input_ids=A ).logits UpperCAmelCase__ : List[Any] = optax.softmax_cross_entropy(A ,onehot(A ,logits.shape[-1] ) ).mean() UpperCAmelCase__ : Tuple = -(labels.shape[-1] * loss.item()) UpperCAmelCase__ : List[str] = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
65
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase="pt" ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {"""add_prefix_space""": True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(""" """ ) else {} UpperCAmelCase__ : List[str] = padding_side return tokenizer( [line] , max_length=__UpperCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , ): '''simple docstring''' UpperCAmelCase__ : str = input_ids.ne(__UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowercase ( __lowerCamelCase ): def __init__( self : Tuple ,A : List[Any] ,A : Union[str, Any] ,A : Any ,A : Optional[int] ,A : Union[str, Any]="train" ,A : Tuple=None ,A : Union[str, Any]=None ,A : Tuple=None ,A : int="" ,): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[Any] = Path(A ).joinpath(type_path + """.source""" ) UpperCAmelCase__ : List[str] = Path(A ).joinpath(type_path + """.target""" ) UpperCAmelCase__ : Dict = self.get_char_lens(self.src_file ) UpperCAmelCase__ : int = max_source_length UpperCAmelCase__ : List[str] = max_target_length assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}" UpperCAmelCase__ : Dict = tokenizer UpperCAmelCase__ : str = prefix if n_obs is not None: UpperCAmelCase__ : int = self.src_lens[:n_obs] UpperCAmelCase__ : Any = src_lang UpperCAmelCase__ : Any = tgt_lang def __len__( self : Optional[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = index + 1 # linecache starts at 1 UpperCAmelCase__ : Tuple = self.prefix + linecache.getline(str(self.src_file ) ,A ).rstrip("""\n""" ) UpperCAmelCase__ : Dict = linecache.getline(str(self.tgt_file ) ,A ).rstrip("""\n""" ) assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,A ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCAmelCase__ : str = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,A ) else self.tokenizer ) UpperCAmelCase__ : Tuple = self.tokenizer.generator if isinstance(self.tokenizer ,A ) else self.tokenizer UpperCAmelCase__ : Tuple = encode_line(A ,A ,self.max_source_length ,"""right""" ) UpperCAmelCase__ : Dict = encode_line(A ,A ,self.max_target_length ,"""right""" ) UpperCAmelCase__ : Optional[Any] = source_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : List[str] = target_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def __lowercase ( A : int ): '''simple docstring''' return [len(A ) for x in Path(A ).open().readlines()] def __lowercase ( self : List[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : int = torch.stack([x["""input_ids"""] for x in batch] ) UpperCAmelCase__ : Union[str, Any] = torch.stack([x["""attention_mask"""] for x in batch] ) UpperCAmelCase__ : Any = torch.stack([x["""decoder_input_ids"""] for x in batch] ) UpperCAmelCase__ : List[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : Any = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : str = trim_batch(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = trim_batch(A ,A ,attention_mask=A ) UpperCAmelCase__ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return list(itertools.chain.from_iterable(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Dict = get_git_info() save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , """git_log.json""" ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=4 , **__UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """w""" ) as f: json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase ) as f: return json.load(__UpperCamelCase ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = git.Repo(search_parent_directories=__UpperCamelCase ) UpperCAmelCase__ : List[str] = { """repo_id""": str(__UpperCamelCase ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return list(map(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """wb""" ) as f: return pickle.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' def remove_articles(__UpperCamelCase ): return re.sub(r"""\b(a|an|the)\b""" , """ """ , __UpperCamelCase ) def white_space_fix(__UpperCamelCase ): return " ".join(text.split() ) def remove_punc(__UpperCamelCase ): UpperCAmelCase__ : List[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : Dict = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : int = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase ) UpperCAmelCase__ : List[str] = sum(common.values() ) if num_same == 0: return 0 UpperCAmelCase__ : str = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : Optional[int] = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : Tuple = (2 * precision * recall) / (precision + recall) return fa def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' assert len(__UpperCamelCase ) == len(__UpperCamelCase ) UpperCAmelCase__ : Union[str, Any] = 0 for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ): em += exact_match_score(__UpperCamelCase , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: em /= len(__UpperCamelCase ) return {"em": em} def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCAmelCase__ : str = """dropout_rate""" for p in extra_params: if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(__UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) continue UpperCAmelCase__ : Tuple = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p] setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) return hparams, config
65
1
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase="pt" ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {"""add_prefix_space""": True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(""" """ ) else {} UpperCAmelCase__ : List[str] = padding_side return tokenizer( [line] , max_length=__UpperCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , ): '''simple docstring''' UpperCAmelCase__ : str = input_ids.ne(__UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowercase ( __lowerCamelCase ): def __init__( self : Tuple ,A : List[Any] ,A : Union[str, Any] ,A : Any ,A : Optional[int] ,A : Union[str, Any]="train" ,A : Tuple=None ,A : Union[str, Any]=None ,A : Tuple=None ,A : int="" ,): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[Any] = Path(A ).joinpath(type_path + """.source""" ) UpperCAmelCase__ : List[str] = Path(A ).joinpath(type_path + """.target""" ) UpperCAmelCase__ : Dict = self.get_char_lens(self.src_file ) UpperCAmelCase__ : int = max_source_length UpperCAmelCase__ : List[str] = max_target_length assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}" UpperCAmelCase__ : Dict = tokenizer UpperCAmelCase__ : str = prefix if n_obs is not None: UpperCAmelCase__ : int = self.src_lens[:n_obs] UpperCAmelCase__ : Any = src_lang UpperCAmelCase__ : Any = tgt_lang def __len__( self : Optional[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = index + 1 # linecache starts at 1 UpperCAmelCase__ : Tuple = self.prefix + linecache.getline(str(self.src_file ) ,A ).rstrip("""\n""" ) UpperCAmelCase__ : Dict = linecache.getline(str(self.tgt_file ) ,A ).rstrip("""\n""" ) assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,A ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCAmelCase__ : str = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,A ) else self.tokenizer ) UpperCAmelCase__ : Tuple = self.tokenizer.generator if isinstance(self.tokenizer ,A ) else self.tokenizer UpperCAmelCase__ : Tuple = encode_line(A ,A ,self.max_source_length ,"""right""" ) UpperCAmelCase__ : Dict = encode_line(A ,A ,self.max_target_length ,"""right""" ) UpperCAmelCase__ : Optional[Any] = source_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : List[str] = target_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def __lowercase ( A : int ): '''simple docstring''' return [len(A ) for x in Path(A ).open().readlines()] def __lowercase ( self : List[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : int = torch.stack([x["""input_ids"""] for x in batch] ) UpperCAmelCase__ : Union[str, Any] = torch.stack([x["""attention_mask"""] for x in batch] ) UpperCAmelCase__ : Any = torch.stack([x["""decoder_input_ids"""] for x in batch] ) UpperCAmelCase__ : List[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : Any = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : str = trim_batch(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = trim_batch(A ,A ,attention_mask=A ) UpperCAmelCase__ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return list(itertools.chain.from_iterable(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Dict = get_git_info() save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , """git_log.json""" ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=4 , **__UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """w""" ) as f: json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase ) as f: return json.load(__UpperCamelCase ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = git.Repo(search_parent_directories=__UpperCamelCase ) UpperCAmelCase__ : List[str] = { """repo_id""": str(__UpperCamelCase ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return list(map(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """wb""" ) as f: return pickle.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' def remove_articles(__UpperCamelCase ): return re.sub(r"""\b(a|an|the)\b""" , """ """ , __UpperCamelCase ) def white_space_fix(__UpperCamelCase ): return " ".join(text.split() ) def remove_punc(__UpperCamelCase ): UpperCAmelCase__ : List[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : Dict = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : int = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase ) UpperCAmelCase__ : List[str] = sum(common.values() ) if num_same == 0: return 0 UpperCAmelCase__ : str = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : Optional[int] = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : Tuple = (2 * precision * recall) / (precision + recall) return fa def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' assert len(__UpperCamelCase ) == len(__UpperCamelCase ) UpperCAmelCase__ : Union[str, Any] = 0 for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ): em += exact_match_score(__UpperCamelCase , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: em /= len(__UpperCamelCase ) return {"em": em} def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCAmelCase__ : str = """dropout_rate""" for p in extra_params: if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(__UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) continue UpperCAmelCase__ : Tuple = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p] setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) return hparams, config
65
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaControlnetPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def __lowercase ( self : int ): '''simple docstring''' return 32 @property def __lowercase ( self : Dict ): '''simple docstring''' return self.time_input_dim @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __lowercase ( self : Any ): '''simple docstring''' return 100 @property def __lowercase ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase__ : int = UNetaDConditionModel(**A ) return model @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowercase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs ) return model def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.dummy_unet UpperCAmelCase__ : List[Any] = self.dummy_movq UpperCAmelCase__ : List[Any] = DDIMScheduler( num_train_timesteps=1_000 ,beta_schedule="""linear""" ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=A ,set_alpha_to_one=A ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=A ,) UpperCAmelCase__ : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __lowercase ( self : str ,A : Optional[Any] ,A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( A ) # create hint UpperCAmelCase__ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase__ : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase__ : Dict = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase__ : Dict = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = """cpu""" UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Union[str, Any] = self.pipeline_class(**A ) UpperCAmelCase__ : Optional[int] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(A ) ) UpperCAmelCase__ : Tuple = output.images UpperCAmelCase__ : Dict = pipe( **self.get_dummy_inputs(A ) ,return_dict=A ,)[0] UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Optional[int] = np.array( [0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def __lowercase ( self : Union[str, Any] ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) UpperCAmelCase__ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) UpperCAmelCase__ : int = torch.from_numpy(np.array(A ) ).float() / 2_5_5.0 UpperCAmelCase__ : Union[str, Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) UpperCAmelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(A ) UpperCAmelCase__ : List[Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) UpperCAmelCase__ : int = pipeline.to(A ) pipeline.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[Any] = """A robot, 4k photo""" UpperCAmelCase__ : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = pipe_prior( A ,generator=A ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() UpperCAmelCase__ : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ : int = pipeline( image_embeds=A ,negative_image_embeds=A ,hint=A ,generator=A ,num_inference_steps=100 ,output_type="""np""" ,) UpperCAmelCase__ : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A ,A )
65
1
"""simple docstring""" from __future__ import annotations def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if partitions <= 0: raise ValueError("""partitions must be a positive number!""" ) if partitions > number_of_bytes: raise ValueError("""partitions can not > number_of_bytes!""" ) UpperCAmelCase__ : Any = number_of_bytes // partitions UpperCAmelCase__ : List[Any] = [] for i in range(__UpperCamelCase ): UpperCAmelCase__ : List[Any] = i * bytes_per_partition + 1 UpperCAmelCase__ : Union[str, Any] = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(F"{start_bytes}-{end_bytes}" ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
65
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = """vision-encoder-decoder""" snake_case_ = True def __init__( self : List[Any] ,**A : Union[str, Any] ): '''simple docstring''' super().__init__(**A ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" ) UpperCAmelCase__ : int = kwargs.pop("""encoder""" ) UpperCAmelCase__ : int = encoder_config.pop("""model_type""" ) UpperCAmelCase__ : str = kwargs.pop("""decoder""" ) UpperCAmelCase__ : Dict = decoder_config.pop("""model_type""" ) UpperCAmelCase__ : List[Any] = AutoConfig.for_model(A ,**A ) UpperCAmelCase__ : Any = AutoConfig.for_model(A ,**A ) UpperCAmelCase__ : Union[str, Any] = True @classmethod def __lowercase ( cls : List[Any] ,A : PretrainedConfig ,A : PretrainedConfig ,**A : Tuple ): '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) UpperCAmelCase__ : Union[str, Any] = True UpperCAmelCase__ : List[Any] = True return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**A ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Dict = self.encoder.to_dict() UpperCAmelCase__ : Any = self.decoder.to_dict() UpperCAmelCase__ : Dict = self.__class__.model_type return output class __lowercase ( __lowerCamelCase ): snake_case_ = version.parse("""1.11""" ) @property def __lowercase ( self : Optional[int] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowercase ( self : List[Any] ): '''simple docstring''' return 1e-4 @property def __lowercase ( self : List[Any] ): '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class __lowercase ( __lowerCamelCase ): @property def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : int = OrderedDict() UpperCAmelCase__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase__ : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase__ : List[str] = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def __lowercase ( self : Dict ,A : "PreTrainedTokenizerBase" ,A : int = -1 ,A : int = -1 ,A : bool = False ,A : Optional["TensorType"] = None ,): '''simple docstring''' import torch UpperCAmelCase__ : int = OrderedDict() UpperCAmelCase__ : List[Any] = super().generate_dummy_inputs( A ,batch_size=A ,seq_length=A ,is_pair=A ,framework=A ) UpperCAmelCase__ , UpperCAmelCase__ : int = dummy_input["""input_ids"""].shape UpperCAmelCase__ : int = (batch, encoder_sequence, self._config.encoder_hidden_size) UpperCAmelCase__ : Tuple = dummy_input.pop("""input_ids""" ) UpperCAmelCase__ : Optional[int] = dummy_input.pop("""attention_mask""" ) UpperCAmelCase__ : Dict = torch.zeros(A ) return common_inputs class __lowercase ( __lowerCamelCase ): @property def __lowercase ( self : str ): '''simple docstring''' pass def __lowercase ( self : Any ,A : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(A ) def __lowercase ( self : Dict ,A : PretrainedConfig ,A : PretrainedConfig ,A : str = "default" ): '''simple docstring''' UpperCAmelCase__ : List[str] = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(A ,A )
65
1
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __UpperCAmelCase = '\\n\n' __UpperCAmelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' __UpperCAmelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): def __lowercase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) ,reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] ,) def __lowercase ( self : str ,A : List[str] ,A : str ,A : int = 16 ,A : bool = True ,A : Union[str, Any]=None ): '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase__ : str = """cuda""" else: UpperCAmelCase__ : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu""" UpperCAmelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(A ) UpperCAmelCase__ : Dict = model.to(A ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(A ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase__ : Dict = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(A ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase__ : int = model.config.max_length - 1 else: UpperCAmelCase__ : List[Any] = model.config.max_length UpperCAmelCase__ : Tuple = tokenizer( A ,add_special_tokens=A ,padding=A ,truncation=A ,max_length=A ,return_tensors="""pt""" ,return_attention_mask=A ,).to(A ) UpperCAmelCase__ : Optional[Any] = encodings["""input_ids"""] UpperCAmelCase__ : Dict = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : str = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 ,len(A ) ,A ) ): UpperCAmelCase__ : List[Any] = min(start_index + batch_size ,len(A ) ) UpperCAmelCase__ : Optional[int] = encoded_texts[start_index:end_index] UpperCAmelCase__ : int = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase__ : str = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A ) UpperCAmelCase__ : Tuple = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 ) UpperCAmelCase__ : Any = torch.cat( [torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(A ), attn_mask] ,dim=1 ) UpperCAmelCase__ : Tuple = encoded_batch with torch.no_grad(): UpperCAmelCase__ : str = model(A ,attention_mask=A ).logits UpperCAmelCase__ : Optional[int] = out_logits[..., :-1, :].contiguous() UpperCAmelCase__ : List[Any] = labels[..., 1:].contiguous() UpperCAmelCase__ : Optional[Any] = attn_mask[..., 1:].contiguous() UpperCAmelCase__ : int = torch.expa( (loss_fct(shift_logits.transpose(1 ,2 ) ,A ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
65
"""simple docstring""" import requests def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Tuple = {"""Content-Type""": """application/json"""} UpperCAmelCase__ : Optional[Any] = requests.post(__UpperCamelCase , json={"""text""": message_body} , headers=__UpperCamelCase ) if response.status_code != 200: UpperCAmelCase__ : Any = ( """Request to slack returned an error """ F"{response.status_code}, the response is:\n{response.text}" ) raise ValueError(__UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
65
1
"""simple docstring""" import socket def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) UpperCAmelCase__ : int = socket.gethostname() UpperCAmelCase__ : Tuple = 12312 sock.connect((host, port) ) sock.send(B"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: UpperCAmelCase__ : Optional[Any] = sock.recv(1024 ) if not data: break out_file.write(__UpperCamelCase ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
65
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = CTRLTokenizer snake_case_ = False snake_case_ = False def __lowercase ( self : List[str] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase__ : Optional[int] = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase__ : int = {"""unk_token""": """<unk>"""} UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def __lowercase ( self : int ,**A : Dict ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : List[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = """adapt react readapt apt""" UpperCAmelCase__ : Any = """adapt react readapt apt""" return input_text, output_text def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase__ : Tuple = """adapt react readapt apt""" UpperCAmelCase__ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase__ : Dict = tokenizer.tokenize(A ) self.assertListEqual(A ,A ) UpperCAmelCase__ : Any = tokens + [tokenizer.unk_token] UpperCAmelCase__ : Dict = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
65
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
65
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
65
1
"""simple docstring""" from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class __lowercase ( __lowerCamelCase , __lowerCamelCase ): snake_case_ = """pixel_values""" snake_case_ = False snake_case_ = TimmBackboneConfig def __init__( self : Union[str, Any] ,A : Optional[int] ,**A : Union[str, Any] ): '''simple docstring''' requires_backends(self ,"""timm""" ) super().__init__(A ) UpperCAmelCase__ : List[Any] = config if config.backbone is None: raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" ) if config.backbone not in timm.list_models(): raise ValueError(f"backbone {config.backbone} is not supported by timm." ) if hasattr(A ,"""out_features""" ) and config.out_features is not None: raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" ) UpperCAmelCase__ : List[Any] = getattr(A ,"""use_pretrained_backbone""" ,A ) if pretrained is None: raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" ) # We just take the final layer by default. This matches the default for the transformers models. UpperCAmelCase__ : List[str] = config.out_indices if getattr(A ,"""out_indices""" ,A ) is not None else (-1,) UpperCAmelCase__ : Dict = timm.create_model( config.backbone ,pretrained=A ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=A ,**A ,) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. UpperCAmelCase__ : Union[str, Any] = self._backbone.return_layers UpperCAmelCase__ : int = {layer["""module"""]: str(A ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(A ) @classmethod def __lowercase ( cls : int ,A : Optional[Any] ,*A : int ,**A : Optional[Any] ): '''simple docstring''' requires_backends(cls ,["""vision""", """timm"""] ) from ...models.timm_backbone import TimmBackboneConfig UpperCAmelCase__ : str = kwargs.pop("""config""" ,TimmBackboneConfig() ) UpperCAmelCase__ : Optional[Any] = kwargs.pop("""use_timm_backbone""" ,A ) if not use_timm: raise ValueError("""use_timm_backbone must be True for timm backbones""" ) UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""num_channels""" ,config.num_channels ) UpperCAmelCase__ : Tuple = kwargs.pop("""features_only""" ,config.features_only ) UpperCAmelCase__ : List[str] = kwargs.pop("""use_pretrained_backbone""" ,config.use_pretrained_backbone ) UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""out_indices""" ,config.out_indices ) UpperCAmelCase__ : Any = TimmBackboneConfig( backbone=A ,num_channels=A ,features_only=A ,use_pretrained_backbone=A ,out_indices=A ,) return super()._from_config(A ,**A ) def __lowercase ( self : Optional[int] ,A : Optional[Any] ): '''simple docstring''' pass def __lowercase ( self : Optional[Any] ,A : Optional[Any] ,A : Optional[Any]=None ,A : Any=None ,A : int=None ,**A : int ): '''simple docstring''' UpperCAmelCase__ : str = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase__ : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase__ : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("""Cannot output attentions for timm backbones at the moment""" ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone UpperCAmelCase__ : Tuple = self._all_layers UpperCAmelCase__ : Tuple = self._backbone(A ,**A ) UpperCAmelCase__ : Tuple = self._return_layers UpperCAmelCase__ : Union[str, Any] = tuple(hidden_states[i] for i in self.out_indices ) else: UpperCAmelCase__ : List[str] = self._backbone(A ,**A ) UpperCAmelCase__ : Any = None UpperCAmelCase__ : List[Any] = tuple(A ) UpperCAmelCase__ : Dict = tuple(A ) if hidden_states is not None else None if not return_dict: UpperCAmelCase__ : str = (feature_maps,) if output_hidden_states: UpperCAmelCase__ : Optional[Any] = output + (hidden_states,) return output return BackboneOutput(feature_maps=A ,hidden_states=A ,attentions=A )
65
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """is_longer"""] def __init__( self : str ,A : Union[str, Any]=64 ,A : Tuple=48_000 ,A : Dict=480 ,A : List[str]=10 ,A : str=1_024 ,A : Any=0.0 ,A : Optional[int]=False ,A : float = 0 ,A : float = 14_000 ,A : int = None ,A : str = "fusion" ,A : str = "repeatpad" ,**A : List[Any] ,): '''simple docstring''' super().__init__( feature_size=A ,sampling_rate=A ,padding_value=A ,return_attention_mask=A ,**A ,) UpperCAmelCase__ : List[Any] = top_db UpperCAmelCase__ : Union[str, Any] = truncation UpperCAmelCase__ : Optional[int] = padding UpperCAmelCase__ : List[Any] = fft_window_size UpperCAmelCase__ : Optional[Any] = (fft_window_size >> 1) + 1 UpperCAmelCase__ : Any = hop_length UpperCAmelCase__ : List[str] = max_length_s UpperCAmelCase__ : List[Any] = max_length_s * sampling_rate UpperCAmelCase__ : List[Any] = sampling_rate UpperCAmelCase__ : Optional[int] = frequency_min UpperCAmelCase__ : Tuple = frequency_max UpperCAmelCase__ : List[str] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm=A ,mel_scale="""htk""" ,) UpperCAmelCase__ : str = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm="""slaney""" ,mel_scale="""slaney""" ,) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __lowercase ( self : List[str] ,A : np.array ,A : Optional[np.array] = None ): '''simple docstring''' UpperCAmelCase__ : Dict = spectrogram( A ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=A ,log_mel="""dB""" ,) return log_mel_spectrogram.T def __lowercase ( self : Optional[Any] ,A : Union[str, Any] ,A : int ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : List[str] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : int = [0] # randomly choose index for each part UpperCAmelCase__ : Tuple = np.random.choice(ranges[0] ) UpperCAmelCase__ : Tuple = np.random.choice(ranges[1] ) UpperCAmelCase__ : str = np.random.choice(ranges[2] ) UpperCAmelCase__ : List[str] = mel[idx_front : idx_front + chunk_frames, :] UpperCAmelCase__ : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] UpperCAmelCase__ : Dict = mel[idx_back : idx_back + chunk_frames, :] UpperCAmelCase__ : Optional[Any] = torch.tensor(mel[None, None, :] ) UpperCAmelCase__ : int = torch.nn.functional.interpolate( A ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=A ) UpperCAmelCase__ : Dict = mel_shrink[0][0].numpy() UpperCAmelCase__ : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def __lowercase ( self : Any ,A : np.array ,A : Optional[int] ,A : Any ,A : Tuple ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": UpperCAmelCase__ : int = True # random crop to max_length (for compatibility) -> this should be handled by self.pad UpperCAmelCase__ : str = len(A ) - max_length UpperCAmelCase__ : Optional[Any] = np.random.randint(0 ,overflow + 1 ) UpperCAmelCase__ : Optional[int] = waveform[idx : idx + max_length] UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": UpperCAmelCase__ : Tuple = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed UpperCAmelCase__ : int = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. UpperCAmelCase__ : List[Any] = np.stack([mel, mel, mel, mel] ,axis=0 ) UpperCAmelCase__ : Any = False else: UpperCAmelCase__ : Union[str, Any] = self._random_mel_fusion(A ,A ,A ) UpperCAmelCase__ : List[str] = True else: raise NotImplementedError(f"data_truncating {truncation} not implemented" ) else: UpperCAmelCase__ : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": UpperCAmelCase__ : str = int(max_length / len(A ) ) UpperCAmelCase__ : int = np.stack(np.tile(A ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": UpperCAmelCase__ : List[Any] = int(max_length / len(A ) ) UpperCAmelCase__ : str = np.stack(np.tile(A ,A ) ) UpperCAmelCase__ : Optional[Any] = np.pad(A ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": UpperCAmelCase__ : int = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : str ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : str = None ,A : Optional[str] = None ,A : Optional[int] = None ,A : Optional[int] = None ,A : Optional[Union[str, TensorType]] = None ,**A : List[str] ,): '''simple docstring''' UpperCAmelCase__ : Optional[int] = truncation if truncation is not None else self.truncation UpperCAmelCase__ : Dict = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[int] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : List[str] = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Any = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [np.asarray(A )] # convert to mel spectrogram, truncate and pad if needed. UpperCAmelCase__ : Tuple = [ self._get_input_mel(A ,max_length if max_length else self.nb_max_samples ,A ,A ) for waveform in raw_speech ] UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Tuple = [] for mel, longer in padded_inputs: input_mel.append(A ) is_longer.append(A ) if truncation == "fusion" and sum(A ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer UpperCAmelCase__ : List[str] = np.random.randint(0 ,len(A ) ) UpperCAmelCase__ : int = True if isinstance(input_mel[0] ,A ): UpperCAmelCase__ : Tuple = [np.asarray(A ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool UpperCAmelCase__ : List[str] = [[longer] for longer in is_longer] UpperCAmelCase__ : List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} UpperCAmelCase__ : str = BatchFeature(A ) if return_tensors is not None: UpperCAmelCase__ : int = input_features.convert_to_tensors(A ) return input_features
65
1
"""simple docstring""" from __future__ import annotations from typing import Any class __lowercase : def __init__( self : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = num_of_nodes UpperCAmelCase__ : list[list[int]] = [] UpperCAmelCase__ : dict[int, int] = {} def __lowercase ( self : Any ,A : int ,A : int ,A : int ): '''simple docstring''' self.m_edges.append([u_node, v_node, weight] ) def __lowercase ( self : Tuple ,A : int ): '''simple docstring''' if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def __lowercase ( self : List[Any] ,A : int ): '''simple docstring''' if self.m_component[u_node] != u_node: for k in self.m_component: UpperCAmelCase__ : List[Any] = self.find_component(A ) def __lowercase ( self : List[str] ,A : list[int] ,A : int ,A : int ): '''simple docstring''' if component_size[u_node] <= component_size[v_node]: UpperCAmelCase__ : Any = v_node component_size[v_node] += component_size[u_node] self.set_component(A ) elif component_size[u_node] >= component_size[v_node]: UpperCAmelCase__ : List[str] = self.find_component(A ) component_size[u_node] += component_size[v_node] self.set_component(A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = [] UpperCAmelCase__ : str = 0 UpperCAmelCase__ : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) UpperCAmelCase__ : Tuple = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = edge UpperCAmelCase__ : Tuple = self.m_component[u] UpperCAmelCase__ : Optional[int] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): UpperCAmelCase__ : Optional[int] = [u, v, w] for edge in minimum_weight_edge: if isinstance(A ,A ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = edge UpperCAmelCase__ : str = self.m_component[u] UpperCAmelCase__ : List[str] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(A ,A ,A ) print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" ) num_of_components -= 1 UpperCAmelCase__ : Union[str, Any] = [-1] * self.m_num_of_nodes print(f"The total weight of the minimal spanning tree is: {mst_weight}" ) def lowerCAmelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
65
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] ,A : Union[str, Any] ,A : Dict=7 ,A : Optional[int]=3 ,A : List[str]=18 ,A : Union[str, Any]=30 ,A : Tuple=400 ,A : Dict=True ,A : List[str]=None ,A : str=True ,A : Optional[Any]=False ,A : Optional[Any]=True ,A : List[str]=True ,A : Optional[int]=[0.5, 0.5, 0.5] ,A : List[str]=[0.5, 0.5, 0.5] ,): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : Union[str, Any] = image_size UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : Optional[int] = max_resolution UpperCAmelCase__ : str = do_resize UpperCAmelCase__ : Tuple = size if size is not None else {"""height""": 18, """width""": 20} UpperCAmelCase__ : List[str] = do_thumbnail UpperCAmelCase__ : Optional[int] = do_align_axis UpperCAmelCase__ : Union[str, Any] = do_pad UpperCAmelCase__ : Tuple = do_normalize UpperCAmelCase__ : Optional[Any] = image_mean UpperCAmelCase__ : List[Any] = image_std def __lowercase ( self : Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = DonutImageProcessor if is_vision_available() else None def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = DonutImageProcessingTester(self ) @property def __lowercase ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"""do_resize""" ) ) self.assertTrue(hasattr(A ,"""size""" ) ) self.assertTrue(hasattr(A ,"""do_thumbnail""" ) ) self.assertTrue(hasattr(A ,"""do_align_long_axis""" ) ) self.assertTrue(hasattr(A ,"""do_pad""" ) ) self.assertTrue(hasattr(A ,"""do_normalize""" ) ) self.assertTrue(hasattr(A ,"""image_mean""" ) ) self.assertTrue(hasattr(A ,"""image_std""" ) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 20} ) UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) ) self.assertEqual(image_processor.size ,{"""height""": 84, """width""": 42} ) def __lowercase ( self : Dict ): '''simple docstring''' pass @is_flaky() def __lowercase ( self : int ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : Tuple = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) @is_flaky() def __lowercase ( self : List[str] ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : Optional[int] = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) @is_flaky() def __lowercase ( self : Any ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : List[Any] = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,)
65
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if len(__UpperCamelCase ) < 2: return collection def circle_sort_util(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> bool: UpperCAmelCase__ : Any = False if low == high: return swapped UpperCAmelCase__ : List[str] = low UpperCAmelCase__ : Optional[int] = high while left < right: if collection[left] > collection[right]: UpperCAmelCase__ , UpperCAmelCase__ : int = ( collection[right], collection[left], ) UpperCAmelCase__ : Optional[Any] = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: UpperCAmelCase__ , UpperCAmelCase__ : Dict = ( collection[right + 1], collection[left], ) UpperCAmelCase__ : List[Any] = True UpperCAmelCase__ : Union[str, Any] = low + int((high - low) / 2 ) UpperCAmelCase__ : Union[str, Any] = circle_sort_util(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) UpperCAmelCase__ : Optional[Any] = circle_sort_util(__UpperCamelCase , mid + 1 , __UpperCamelCase ) return swapped or left_swap or right_swap UpperCAmelCase__ : List[str] = True while is_not_sorted is True: UpperCAmelCase__ : List[Any] = circle_sort_util(__UpperCamelCase , 0 , len(__UpperCamelCase ) - 1 ) return collection if __name__ == "__main__": __UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip() __UpperCAmelCase = [int(item) for item in user_input.split(',')] print(circle_sort(unsorted))
65
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class __lowercase ( __lowerCamelCase ): snake_case_ = """open-llama""" def __init__( self : Dict ,A : str=100_000 ,A : str=4_096 ,A : Optional[Any]=11_008 ,A : Tuple=32 ,A : str=32 ,A : Optional[int]="silu" ,A : List[Any]=2_048 ,A : str=0.0_2 ,A : Optional[int]=1e-6 ,A : int=True ,A : Tuple=0 ,A : str=1 ,A : Any=2 ,A : Optional[Any]=False ,A : int=True ,A : Any=0.1 ,A : Optional[Any]=0.1 ,A : Optional[Any]=True ,A : Union[str, Any]=True ,A : Tuple=None ,**A : Optional[int] ,): '''simple docstring''' UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : Optional[int] = rms_norm_eps UpperCAmelCase__ : Any = use_cache UpperCAmelCase__ : Optional[Any] = kwargs.pop( """use_memorry_efficient_attention""" ,A ) UpperCAmelCase__ : Any = hidden_dropout_prob UpperCAmelCase__ : str = attention_dropout_prob UpperCAmelCase__ : Optional[int] = use_stable_embedding UpperCAmelCase__ : Tuple = shared_input_output_embedding UpperCAmelCase__ : Tuple = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,tie_word_embeddings=A ,**A ,) def __lowercase ( self : Optional[Any] ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,A ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f"got {self.rope_scaling}" ) UpperCAmelCase__ : List[Any] = self.rope_scaling.get("""type""" ,A ) UpperCAmelCase__ : int = self.rope_scaling.get("""factor""" ,A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(A ,A ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
65
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} __UpperCAmelCase = { 'vocab_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json', 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json' ), }, 'merges_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt', 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt' ), }, 'tokenizer_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json', 'roberta-base-openai-detector': ( 'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json' ), 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json' ), }, } __UpperCAmelCase = { 'roberta-base': 512, 'roberta-large': 512, 'roberta-large-mnli': 512, 'distilroberta-base': 512, 'roberta-base-openai-detector': 512, 'roberta-large-openai-detector': 512, } class __lowercase ( __lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["""input_ids""", """attention_mask"""] snake_case_ = RobertaTokenizer def __init__( self : List[str] ,A : Optional[int]=None ,A : Tuple=None ,A : int=None ,A : Tuple="replace" ,A : Tuple="<s>" ,A : Dict="</s>" ,A : Optional[int]="</s>" ,A : Dict="<s>" ,A : List[Any]="<unk>" ,A : Optional[Any]="<pad>" ,A : List[str]="<mask>" ,A : List[Any]=False ,A : int=True ,**A : Optional[int] ,): '''simple docstring''' super().__init__( A ,A ,tokenizer_file=A ,errors=A ,bos_token=A ,eos_token=A ,sep_token=A ,cls_token=A ,unk_token=A ,pad_token=A ,mask_token=A ,add_prefix_space=A ,trim_offsets=A ,**A ,) UpperCAmelCase__ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" ,A ) != add_prefix_space: UpperCAmelCase__ : Union[str, Any] = getattr(A ,pre_tok_state.pop("""type""" ) ) UpperCAmelCase__ : Dict = add_prefix_space UpperCAmelCase__ : Dict = pre_tok_class(**A ) UpperCAmelCase__ : List[Any] = add_prefix_space UpperCAmelCase__ : Dict = """post_processor""" UpperCAmelCase__ : Any = getattr(self.backend_tokenizer ,A ,A ) if tokenizer_component_instance: UpperCAmelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase__ : List[Any] = tuple(state["""sep"""] ) if "cls" in state: UpperCAmelCase__ : List[str] = tuple(state["""cls"""] ) UpperCAmelCase__ : Dict = False if state.get("""add_prefix_space""" ,A ) != add_prefix_space: UpperCAmelCase__ : Tuple = add_prefix_space UpperCAmelCase__ : str = True if state.get("""trim_offsets""" ,A ) != trim_offsets: UpperCAmelCase__ : Tuple = trim_offsets UpperCAmelCase__ : str = True if changes_to_apply: UpperCAmelCase__ : List[str] = getattr(A ,state.pop("""type""" ) ) UpperCAmelCase__ : List[Any] = component_class(**A ) setattr(self.backend_tokenizer ,A ,A ) @property def __lowercase ( self : Dict ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def __lowercase ( self : Dict ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else value UpperCAmelCase__ : Union[str, Any] = value def __lowercase ( self : int ,*A : Optional[Any] ,**A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = kwargs.get("""is_split_into_words""" ,A ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*A ,**A ) def __lowercase ( self : Union[str, Any] ,*A : Optional[Any] ,**A : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = kwargs.get("""is_split_into_words""" ,A ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*A ,**A ) def __lowercase ( self : List[str] ,A : str ,A : Optional[str] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self._tokenizer.model.save(A ,name=A ) return tuple(A ) def __lowercase ( self : Optional[int] ,A : List[Any] ,A : str=None ): '''simple docstring''' UpperCAmelCase__ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __lowercase ( self : List[str] ,A : List[int] ,A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [self.sep_token_id] UpperCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
65
"""simple docstring""" from collections.abc import Callable class __lowercase : def __init__( self : Tuple ,A : Callable | None = None ): '''simple docstring''' # Stores actual heap items. UpperCAmelCase__ : list = [] # Stores indexes of each item for supporting updates and deletion. UpperCAmelCase__ : dict = {} # Stores current size of heap. UpperCAmelCase__ : Any = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. UpperCAmelCase__ : int = key or (lambda A : x) def __lowercase ( self : Union[str, Any] ,A : int ): '''simple docstring''' return int((i - 1) / 2 ) if i > 0 else None def __lowercase ( self : Tuple ,A : int ): '''simple docstring''' UpperCAmelCase__ : Any = int(2 * i + 1 ) return left if 0 < left < self.size else None def __lowercase ( self : Any ,A : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = int(2 * i + 2 ) return right if 0 < right < self.size else None def __lowercase ( self : List[Any] ,A : int ,A : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.arr[j], self.arr[i] def __lowercase ( self : Optional[int] ,A : int ,A : int ): '''simple docstring''' return self.arr[i][1] < self.arr[j][1] def __lowercase ( self : Optional[int] ,A : int ): '''simple docstring''' UpperCAmelCase__ : int = self._left(A ) UpperCAmelCase__ : Dict = self._right(A ) UpperCAmelCase__ : Optional[int] = i if left is not None and not self._cmp(A ,A ): UpperCAmelCase__ : List[Any] = left if right is not None and not self._cmp(A ,A ): UpperCAmelCase__ : List[Any] = right return valid_parent def __lowercase ( self : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : int = self._parent(A ) while parent is not None and not self._cmp(A ,A ): self._swap(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : int = parent, self._parent(A ) def __lowercase ( self : str ,A : int ): '''simple docstring''' UpperCAmelCase__ : Any = self._get_valid_parent(A ) while valid_parent != index: self._swap(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = valid_parent, self._get_valid_parent(A ) def __lowercase ( self : Optional[Any] ,A : int ,A : int ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase__ : Tuple = self.pos_map[item] UpperCAmelCase__ : Dict = [item, self.key(A )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(A ) self._heapify_down(A ) def __lowercase ( self : List[Any] ,A : int ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase__ : Any = self.pos_map[item] del self.pos_map[item] UpperCAmelCase__ : Dict = self.arr[self.size - 1] UpperCAmelCase__ : List[Any] = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(A ) self._heapify_down(A ) def __lowercase ( self : str ,A : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : Dict = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(A )] ) else: UpperCAmelCase__ : List[str] = [item, self.key(A )] UpperCAmelCase__ : Union[str, Any] = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __lowercase ( self : str ): '''simple docstring''' return self.arr[0] if self.size else None def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def lowerCAmelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
65
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json', # See all GLPN models at https://huggingface.co/models?filter=glpn } class __lowercase ( __lowerCamelCase ): snake_case_ = """glpn""" def __init__( self : List[Any] ,A : int=3 ,A : Tuple=4 ,A : List[str]=[2, 2, 2, 2] ,A : Union[str, Any]=[8, 4, 2, 1] ,A : List[str]=[32, 64, 160, 256] ,A : str=[7, 3, 3, 3] ,A : int=[4, 2, 2, 2] ,A : Optional[Any]=[1, 2, 5, 8] ,A : Optional[Any]=[4, 4, 4, 4] ,A : List[Any]="gelu" ,A : Optional[int]=0.0 ,A : int=0.0 ,A : int=0.0_2 ,A : Tuple=0.1 ,A : Any=1e-6 ,A : Any=64 ,A : List[Any]=10 ,A : Any=-1 ,**A : int ,): '''simple docstring''' super().__init__(**A ) UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Tuple = num_encoder_blocks UpperCAmelCase__ : Any = depths UpperCAmelCase__ : Union[str, Any] = sr_ratios UpperCAmelCase__ : Any = hidden_sizes UpperCAmelCase__ : List[str] = patch_sizes UpperCAmelCase__ : Tuple = strides UpperCAmelCase__ : Tuple = mlp_ratios UpperCAmelCase__ : Optional[Any] = num_attention_heads UpperCAmelCase__ : Union[str, Any] = hidden_act UpperCAmelCase__ : Dict = hidden_dropout_prob UpperCAmelCase__ : Dict = attention_probs_dropout_prob UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Dict = drop_path_rate UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Tuple = decoder_hidden_size UpperCAmelCase__ : Optional[int] = max_depth UpperCAmelCase__ : Dict = head_in_index
65
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """attention_mask"""] def __init__( self : Any ,A : str=80 ,A : Optional[int]=16_000 ,A : int=0.0 ,A : str=10 ,A : Any=25 ,A : str="hamming_window" ,A : int=3_2_7_6_8.0 ,A : List[str]=0.9_7 ,A : Optional[int]=1.0 ,A : Optional[Any]=True ,A : Tuple=True ,A : Any=False ,**A : int ,): '''simple docstring''' super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A ) UpperCAmelCase__ : str = feature_size UpperCAmelCase__ : int = sampling_rate UpperCAmelCase__ : int = padding_value UpperCAmelCase__ : Dict = hop_length UpperCAmelCase__ : int = win_length UpperCAmelCase__ : Dict = frame_signal_scale UpperCAmelCase__ : Dict = preemphasis_coeff UpperCAmelCase__ : str = mel_floor UpperCAmelCase__ : Any = normalize_means UpperCAmelCase__ : str = normalize_vars UpperCAmelCase__ : int = win_function UpperCAmelCase__ : List[Any] = return_attention_mask UpperCAmelCase__ : str = win_length * sampling_rate // 1_000 UpperCAmelCase__ : List[Any] = hop_length * sampling_rate // 1_000 UpperCAmelCase__ : int = optimal_fft_length(self.sample_size ) UpperCAmelCase__ : List[Any] = (self.n_fft // 2) + 1 def __lowercase ( self : Union[str, Any] ,A : np.array ): '''simple docstring''' if self.win_function == "hamming_window": UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A ) else: UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ) UpperCAmelCase__ : Union[str, Any] = mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,) UpperCAmelCase__ : Optional[Any] = spectrogram( one_waveform * self.frame_signal_scale ,window=A ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=A ,preemphasis=self.preemphasis_coeff ,mel_filters=A ,mel_floor=self.mel_floor ,log_mel="""log""" ,) return msfc_features.T def __lowercase ( self : str ,A : Any ,A : Optional[int] ,A : str ): '''simple docstring''' # make sure we normalize float32 arrays if self.normalize_means: UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 ) UpperCAmelCase__ : Any = np.subtract(A ,A ) if self.normalize_vars: UpperCAmelCase__ : str = x[:input_length].std(axis=0 ) UpperCAmelCase__ : Optional[int] = np.divide(A ,A ) if input_length < x.shape[0]: UpperCAmelCase__ : int = padding_value # make sure array is in float32 UpperCAmelCase__ : str = x.astype(np.floataa ) return x def __lowercase ( self : Union[str, Any] ,A : List[np.ndarray] ,A : Optional[np.ndarray] = None ): '''simple docstring''' UpperCAmelCase__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(A ,A ,self.padding_value ) for x, n in zip(A ,A )] def __call__( self : Union[str, Any] ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : Union[bool, str, PaddingStrategy] = False ,A : Optional[int] = None ,A : bool = False ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[Union[str, TensorType]] = None ,A : Optional[int] = None ,**A : Tuple ,): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[Any] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : Any = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : List[str] = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Union[str, Any] = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [raw_speech] # extract fbank features UpperCAmelCase__ : Tuple = [self._extract_mfsc_features(A ) for one_waveform in raw_speech] # convert into correct format for padding UpperCAmelCase__ : str = BatchFeature({"""input_features""": features} ) UpperCAmelCase__ : Optional[Any] = self.pad( A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,) # make sure list is in array format UpperCAmelCase__ : Tuple = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] ,A ): UpperCAmelCase__ : Union[str, Any] = [np.asarray(A ,dtype=np.floataa ) for feature in input_features] UpperCAmelCase__ : Dict = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: UpperCAmelCase__ : Union[str, Any] = ( np.array(A ,dtype=np.intaa ) if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) UpperCAmelCase__ : Any = self.normalize( padded_inputs["""input_features"""] ,attention_mask=A ) if return_tensors is not None: UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(A ) return padded_inputs
65
1
"""simple docstring""" import os def lowerCAmelCase ( ): '''simple docstring''' with open(os.path.dirname(__UpperCamelCase ) + """/grid.txt""" ) as f: UpperCAmelCase__ : List[Any] = [] # noqa: E741 for _ in range(20 ): l.append([int(__UpperCamelCase ) for x in f.readline().split()] ) UpperCAmelCase__ : Dict = 0 # right for i in range(20 ): for j in range(17 ): UpperCAmelCase__ : Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: UpperCAmelCase__ : Optional[Any] = temp # down for i in range(17 ): for j in range(20 ): UpperCAmelCase__ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: UpperCAmelCase__ : Tuple = temp # diagonal 1 for i in range(17 ): for j in range(17 ): UpperCAmelCase__ : Union[str, Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: UpperCAmelCase__ : List[Any] = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): UpperCAmelCase__ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: UpperCAmelCase__ : Union[str, Any] = temp return maximum if __name__ == "__main__": print(solution())
65
"""simple docstring""" from math import factorial def lowerCAmelCase ( __UpperCamelCase = 100 ): '''simple docstring''' return sum(int(__UpperCamelCase ) for x in str(factorial(__UpperCamelCase ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
65
1
"""simple docstring""" import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : List[str] = np.full((len(__UpperCamelCase ), sequence_length, 2) , __UpperCamelCase ) else: UpperCAmelCase__ : Any = np.full((len(__UpperCamelCase ), sequence_length) , __UpperCamelCase ) for i, tensor in enumerate(__UpperCamelCase ): if padding_side == "right": if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : str = tensor[:sequence_length] else: UpperCAmelCase__ : str = tensor[:sequence_length] else: if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length] else: UpperCAmelCase__ : Union[str, Any] = tensor[:sequence_length] return out_tensor.tolist() def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : str = ord(__UpperCamelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True UpperCAmelCase__ : Optional[Any] = unicodedata.category(__UpperCamelCase ) if cat.startswith("""P""" ): return True return False @dataclass class __lowercase ( __lowerCamelCase ): snake_case_ = 42 snake_case_ = True snake_case_ = None snake_case_ = None snake_case_ = -1_0_0 snake_case_ = "pt" def __lowercase ( self : Dict ,A : List[str] ): '''simple docstring''' import torch UpperCAmelCase__ : Dict = """label""" if """label""" in features[0].keys() else """labels""" UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None UpperCAmelCase__ : Any = self.tokenizer.pad( A ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" if labels is None else None ,) if labels is None: return batch UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch["""entity_ids"""] ).shape[1] UpperCAmelCase__ : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": UpperCAmelCase__ : Optional[Any] = [ list(A ) + [self.label_pad_token_id] * (sequence_length - len(A )) for label in labels ] else: UpperCAmelCase__ : Tuple = [ [self.label_pad_token_id] * (sequence_length - len(A )) + list(A ) for label in labels ] UpperCAmelCase__ : str = [feature["""ner_tags"""] for feature in features] UpperCAmelCase__ : Any = padding_tensor(A ,-1 ,A ,A ) UpperCAmelCase__ : List[str] = [feature["""original_entity_spans"""] for feature in features] UpperCAmelCase__ : Optional[int] = padding_tensor(A ,(-1, -1) ,A ,A ) UpperCAmelCase__ : Optional[int] = {k: torch.tensor(A ,dtype=torch.intaa ) for k, v in batch.items()} return batch
65
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class __lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] ,A : Optional[int] ,A : int=13 ,A : Tuple=7 ,A : Dict=True ,A : Optional[int]=True ,A : Tuple=True ,A : str=True ,A : Any=99 ,A : Tuple=32 ,A : Dict=5 ,A : Optional[int]=4 ,A : Dict=37 ,A : Any="gelu" ,A : Any=0.1 ,A : Optional[int]=0.1 ,A : Union[str, Any]=512 ,A : Any=16 ,A : List[str]=2 ,A : List[Any]=0.0_2 ,A : Optional[int]=4 ,): '''simple docstring''' UpperCAmelCase__ : Dict = parent UpperCAmelCase__ : Any = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : Optional[int] = is_training UpperCAmelCase__ : Optional[Any] = use_attention_mask UpperCAmelCase__ : int = use_token_type_ids UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Any = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Dict = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : Any = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : List[Any] = type_vocab_size UpperCAmelCase__ : List[str] = type_sequence_label_size UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = num_choices def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase__ : List[str] = None if self.use_attention_mask: UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : int = DistilBertConfig( vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=A ,) return config, input_ids, attention_mask def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs UpperCAmelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = FlaxDistilBertModelTester(self ) @slow def __lowercase ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" ) UpperCAmelCase__ : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(A ) @require_flax class __lowercase ( unittest.TestCase ): @slow def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) UpperCAmelCase__ : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) UpperCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCAmelCase__ : Dict = model(A ,attention_mask=A )[0] UpperCAmelCase__ : List[Any] = (1, 11, 768) self.assertEqual(output.shape ,A ) UpperCAmelCase__ : Any = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,A ,atol=1e-4 ) )
65
1
"""simple docstring""" import math def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : int = 2 UpperCAmelCase__ : Dict = int(math.sqrt(__UpperCamelCase ) ) # Size of every segment UpperCAmelCase__ : List[str] = [True] * (end + 1) UpperCAmelCase__ : Tuple = [] while start <= end: if temp[start] is True: in_prime.append(__UpperCamelCase ) for i in range(start * start , end + 1 , __UpperCamelCase ): UpperCAmelCase__ : Optional[int] = False start += 1 prime += in_prime UpperCAmelCase__ : int = end + 1 UpperCAmelCase__ : str = min(2 * end , __UpperCamelCase ) while low <= n: UpperCAmelCase__ : int = [True] * (high - low + 1) for each in in_prime: UpperCAmelCase__ : Union[str, Any] = math.floor(low / each ) * each if t < low: t += each for j in range(__UpperCamelCase , high + 1 , __UpperCamelCase ): UpperCAmelCase__ : int = False for j in range(len(__UpperCamelCase ) ): if temp[j] is True: prime.append(j + low ) UpperCAmelCase__ : str = high + 1 UpperCAmelCase__ : Dict = min(high + end , __UpperCamelCase ) return prime print(sieve(10**6))
65
"""simple docstring""" __UpperCAmelCase = frozenset( [ 'prompt', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'prompt', 'image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # Text guided image variation with an image mask 'prompt', 'image', 'mask_image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # image variation with an image mask 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image', 'mask_image']) __UpperCAmelCase = frozenset( [ 'example_image', 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset( [ 'prompt', 'audio_length_in_s', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset(['input_tokens']) __UpperCAmelCase = frozenset(['input_tokens'])
65
1
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase = 16 __UpperCAmelCase = 32 def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = "bert-base-cased" ): '''simple docstring''' UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(__UpperCamelCase ) UpperCAmelCase__ : Dict = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase__ : Dict = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__UpperCamelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. UpperCAmelCase__ : Dict = DataLoader( tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) UpperCAmelCase__ : Union[str, Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) return train_dataloader, eval_dataloader def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Tuple = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ : Union[str, Any] = config["""lr"""] UpperCAmelCase__ : List[Any] = int(config["""num_epochs"""] ) UpperCAmelCase__ : Dict = int(config["""seed"""] ) UpperCAmelCase__ : str = int(config["""batch_size"""] ) UpperCAmelCase__ : str = args.model_name_or_path set_seed(__UpperCamelCase ) UpperCAmelCase__ , UpperCAmelCase__ : str = get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase ) # Instantiate optimizer UpperCAmelCase__ : Tuple = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase__ : Tuple = optimizer_cls(params=model.parameters() , lr=__UpperCamelCase ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase__ : int = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Tuple = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase__ : Dict = get_linear_schedule_with_warmup( optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , ) else: UpperCAmelCase__ : str = DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase__ : Optional[int] = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase__ : Optional[Any] = 0 # Now we train the model UpperCAmelCase__ : str = evaluate.load("""glue""" , """mrpc""" ) UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : int = {} for epoch in range(__UpperCamelCase , __UpperCamelCase ): model.train() for step, batch in enumerate(__UpperCamelCase ): UpperCAmelCase__ : List[Any] = model(**__UpperCamelCase ) UpperCAmelCase__ : List[str] = outputs.loss UpperCAmelCase__ : Optional[Any] = loss / gradient_accumulation_steps accelerator.backward(__UpperCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() UpperCAmelCase__ : Dict = 0 for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : int = model(**__UpperCamelCase ) UpperCAmelCase__ : Optional[int] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__UpperCamelCase ) - 1: UpperCAmelCase__ : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__UpperCamelCase , references=__UpperCamelCase , ) UpperCAmelCase__ : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , __UpperCamelCase ) UpperCAmelCase__ : str = eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: UpperCAmelCase__ : List[Any] = eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=__UpperCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCamelCase , ) parser.add_argument( """--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=__UpperCamelCase , default=3 , help="""Number of train epochs.""" , ) UpperCAmelCase__ : List[Any] = parser.parse_args() UpperCAmelCase__ : Optional[int] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
65
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __lowercase ( unittest.TestCase ): def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split() UpperCAmelCase__ : Tuple = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[Any] = { """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>""", } UpperCAmelCase__ : int = { """feature_size""": 1, """padding_value""": 0.0, """sampling_rate""": 16_000, """return_attention_mask""": False, """do_normalize""": True, } UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp() UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,A ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) # load decoder from hub UpperCAmelCase__ : int = """hf-internal-testing/ngram-beam-search-decoder""" def __lowercase ( self : str ,**A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.add_kwargs_tokens_map.copy() kwargs.update(A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : List[str] ,**A : Dict ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Any ,**A : List[Any] ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**A ) def __lowercase ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,A ) # decoder self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,) self.assertIsInstance(processor.decoder ,A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha ,5.0 ) self.assertEqual(processor.language_model.beta ,3.0 ) self.assertEqual(processor.language_model.score_boundary ,-7.0 ) self.assertEqual(processor.language_model.unk_score_offset ,3 ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["""xx"""] ) with self.assertRaisesRegex(A ,"""include""" ): WavaVecaProcessorWithLM( tokenizer=A ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : str = floats_list((3, 1_000) ) UpperCAmelCase__ : Optional[Any] = feature_extractor(A ,return_tensors="""np""" ) UpperCAmelCase__ : List[Any] = processor(A ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[int] = self.get_decoder() UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : List[Any] = """This is a test string""" UpperCAmelCase__ : int = processor(text=A ) UpperCAmelCase__ : Dict = tokenizer(A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def __lowercase ( self : Tuple ,A : List[Any]=(2, 10, 16) ,A : Dict=77 ): '''simple docstring''' np.random.seed(A ) return np.random.rand(*A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Dict = self._get_dummy_logits(shape=(10, 16) ,seed=13 ) UpperCAmelCase__ : Tuple = processor.decode(A ) UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(A )[0] self.assertEqual(decoded_decoder[0] ,decoded_processor.text ) self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text ) self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score ) @parameterized.expand([[None], ["""fork"""], ["""spawn"""]] ) def __lowercase ( self : List[str] ,A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : List[str] = processor.batch_decode(A ) else: with get_context(A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(A ,A ) UpperCAmelCase__ : Optional[Any] = list(A ) with get_context("""fork""" ).Pool() as p: UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams_batch(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(A ,decoded_processor.text ) self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text ) self.assertListEqual(A ,decoded_processor.logit_score ) self.assertListEqual(A ,decoded_processor.lm_score ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Any = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : List[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,) UpperCAmelCase__ : List[str] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(A ) with get_context("""fork""" ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( A ,A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,) UpperCAmelCase__ : List[Any] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : List[str] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(A ,A ) self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,A ) self.assertTrue(np.array_equal(A ,decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,A ,atol=1e-3 ) ) self.assertTrue(np.array_equal(A ,decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,A ,atol=1e-3 ) ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : Tuple = 2.0 UpperCAmelCase__ : str = 5.0 UpperCAmelCase__ : Union[str, Any] = -2_0.0 UpperCAmelCase__ : Optional[Any] = True UpperCAmelCase__ : str = processor.batch_decode( A ,alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,) UpperCAmelCase__ : Any = decoded_processor_out.text UpperCAmelCase__ : Union[str, Any] = list(A ) decoder.reset_params( alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,) with get_context("""fork""" ).Pool() as pool: UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch( A ,A ,) UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(A ,A ) self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,A ) UpperCAmelCase__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha ,2.0 ) self.assertEqual(lm_model.beta ,5.0 ) self.assertEqual(lm_model.unk_score_offset ,-2_0.0 ) self.assertEqual(lm_model.score_boundary ,A ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : str = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCAmelCase__ : Optional[int] = os.listdir(A ) UpperCAmelCase__ : List[Any] = ["""alphabet.json""", """language_model"""] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(A ,A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(A ) UpperCAmelCase__ : Tuple = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCAmelCase__ : Tuple = os.listdir(A ) UpperCAmelCase__ : Dict = os.listdir(A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(A ,A ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Dict = floats_list((3, 1_000) ) UpperCAmelCase__ : List[str] = processor_wavaveca(A ,return_tensors="""np""" ) UpperCAmelCase__ : Dict = processor_auto(A ,return_tensors="""np""" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 ) UpperCAmelCase__ : List[str] = self._get_dummy_logits() UpperCAmelCase__ : Tuple = processor_wavaveca.batch_decode(A ) UpperCAmelCase__ : List[str] = processor_auto.batch_decode(A ) self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) self.assertListEqual( processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,) @staticmethod def __lowercase ( A : Optional[Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets] return retrieved_list def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Dict = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(A ,output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(A ,A ) ) self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : int = self._get_dummy_logits() UpperCAmelCase__ : Any = processor.batch_decode(A ,output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(A ,A ) ) self.assertListEqual( [""" """.join(self.get_from_offsets(A ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] ) @slow @require_torch @require_torchaudio def __lowercase ( self : Tuple ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=A ) UpperCAmelCase__ : Tuple = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : Tuple = iter(A ) UpperCAmelCase__ : Optional[int] = next(A ) UpperCAmelCase__ : List[Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) UpperCAmelCase__ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : Tuple = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(A ).logits.cpu().numpy() UpperCAmelCase__ : Any = processor.decode(logits[0] ,output_word_offsets=A ) UpperCAmelCase__ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Union[str, Any] = [ { """start_time""": d["""start_offset"""] * time_offset, """end_time""": d["""end_offset"""] * time_offset, """word""": d["""word"""], } for d in output["""word_offsets"""] ] UpperCAmelCase__ : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL""" # output words self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,A ) self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,output.text ) # output times UpperCAmelCase__ : str = torch.tensor(self.get_from_offsets(A ,"""start_time""" ) ) UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(A ,"""end_time""" ) ) # fmt: off UpperCAmelCase__ : Union[str, Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) ) self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
65
1
"""simple docstring""" import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = False, False, False @dataclass class __lowercase : snake_case_ = None snake_case_ = True snake_case_ = True snake_case_ = None # Automatically constructed snake_case_ = "dict" snake_case_ = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) snake_case_ = field(default="""Audio""" , init=__lowerCamelCase , repr=__lowerCamelCase ) def __call__( self : Tuple ): '''simple docstring''' return self.pa_type def __lowercase ( self : List[str] ,A : Union[str, bytes, dict] ): '''simple docstring''' try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err if isinstance(A ,A ): return {"bytes": None, "path": value} elif isinstance(A ,A ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes UpperCAmelCase__ : int = BytesIO() sf.write(A ,value["""array"""] ,value["""sampling_rate"""] ,format="""wav""" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("""pcm""" ): # "PCM" only has raw audio bytes if value.get("""sampling_rate""" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" ) if value.get("""bytes""" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) UpperCAmelCase__ : Optional[int] = np.frombuffer(value["""bytes"""] ,dtype=np.intaa ).astype(np.floataa ) / 32_767 else: UpperCAmelCase__ : str = np.memmap(value["""path"""] ,dtype="""h""" ,mode="""r""" ).astype(np.floataa ) / 32_767 UpperCAmelCase__ : List[Any] = BytesIO(bytes() ) sf.write(A ,A ,value["""sampling_rate"""] ,format="""wav""" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def __lowercase ( self : Optional[Any] ,A : dict ,A : Optional[Dict[str, Union[str, bool, None]]] = None ): '''simple docstring''' if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None) if path is None and file is None: raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err UpperCAmelCase__ : Optional[int] = xsplitext(A )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( """Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( """Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ ) if file is None: UpperCAmelCase__ : Union[str, Any] = token_per_repo_id or {} UpperCAmelCase__ : Union[str, Any] = path.split("""::""" )[-1] try: UpperCAmelCase__ : List[Any] = string_to_dict(A ,config.HUB_DATASETS_URL )["""repo_id"""] UpperCAmelCase__ : Optional[Any] = token_per_repo_id[repo_id] except (ValueError, KeyError): UpperCAmelCase__ : str = None with xopen(A ,"""rb""" ,use_auth_token=A ) as f: UpperCAmelCase__ , UpperCAmelCase__ : Any = sf.read(A ) else: UpperCAmelCase__ , UpperCAmelCase__ : int = sf.read(A ) UpperCAmelCase__ : List[Any] = array.T if self.mono: UpperCAmelCase__ : List[str] = librosa.to_mono(A ) if self.sampling_rate and self.sampling_rate != sampling_rate: UpperCAmelCase__ : Tuple = librosa.resample(A ,orig_sr=A ,target_sr=self.sampling_rate ) UpperCAmelCase__ : str = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __lowercase ( self : Tuple ): '''simple docstring''' from .features import Value if self.decode: raise ValueError("""Cannot flatten a decoded Audio feature.""" ) return { "bytes": Value("""binary""" ), "path": Value("""string""" ), } def __lowercase ( self : Optional[int] ,A : Union[pa.StringArray, pa.StructArray] ): '''simple docstring''' if pa.types.is_string(storage.type ): UpperCAmelCase__ : str = pa.array([None] * len(A ) ,type=pa.binary() ) UpperCAmelCase__ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,["""bytes""", """path"""] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase__ : List[Any] = pa.array([None] * len(A ) ,type=pa.string() ) UpperCAmelCase__ : int = pa.StructArray.from_arrays([storage, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ): UpperCAmelCase__ : Dict = pa.array([Audio().encode_example(A ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: UpperCAmelCase__ : List[Any] = storage.field("""bytes""" ) else: UpperCAmelCase__ : Tuple = pa.array([None] * len(A ) ,type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: UpperCAmelCase__ : List[str] = storage.field("""path""" ) else: UpperCAmelCase__ : Union[str, Any] = pa.array([None] * len(A ) ,type=pa.string() ) UpperCAmelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() ) return array_cast(A ,self.pa_type ) def __lowercase ( self : List[str] ,A : pa.StructArray ): '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(A : Tuple ): with xopen(A ,"""rb""" ) as f: UpperCAmelCase__ : Any = f.read() return bytes_ UpperCAmelCase__ : Dict = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) UpperCAmelCase__ : str = pa.array( [os.path.basename(A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] ,type=pa.string() ,) UpperCAmelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() ) return array_cast(A ,self.pa_type )
65
"""simple docstring""" from sklearn.metrics import fa_score import datasets __UpperCAmelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' __UpperCAmelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' __UpperCAmelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): def __lowercase ( self : List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,) def __lowercase ( self : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Optional[Any]=None ,A : List[str]=1 ,A : Optional[Any]="binary" ,A : Any=None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = fa_score( A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A ) return {"f1": float(A ) if score.size == 1 else score}
65
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """is_longer"""] def __init__( self : str ,A : Union[str, Any]=64 ,A : Tuple=48_000 ,A : Dict=480 ,A : List[str]=10 ,A : str=1_024 ,A : Any=0.0 ,A : Optional[int]=False ,A : float = 0 ,A : float = 14_000 ,A : int = None ,A : str = "fusion" ,A : str = "repeatpad" ,**A : List[Any] ,): '''simple docstring''' super().__init__( feature_size=A ,sampling_rate=A ,padding_value=A ,return_attention_mask=A ,**A ,) UpperCAmelCase__ : List[Any] = top_db UpperCAmelCase__ : Union[str, Any] = truncation UpperCAmelCase__ : Optional[int] = padding UpperCAmelCase__ : List[Any] = fft_window_size UpperCAmelCase__ : Optional[Any] = (fft_window_size >> 1) + 1 UpperCAmelCase__ : Any = hop_length UpperCAmelCase__ : List[str] = max_length_s UpperCAmelCase__ : List[Any] = max_length_s * sampling_rate UpperCAmelCase__ : List[Any] = sampling_rate UpperCAmelCase__ : Optional[int] = frequency_min UpperCAmelCase__ : Tuple = frequency_max UpperCAmelCase__ : List[str] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm=A ,mel_scale="""htk""" ,) UpperCAmelCase__ : str = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm="""slaney""" ,mel_scale="""slaney""" ,) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __lowercase ( self : List[str] ,A : np.array ,A : Optional[np.array] = None ): '''simple docstring''' UpperCAmelCase__ : Dict = spectrogram( A ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=A ,log_mel="""dB""" ,) return log_mel_spectrogram.T def __lowercase ( self : Optional[Any] ,A : Union[str, Any] ,A : int ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : List[str] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : int = [0] # randomly choose index for each part UpperCAmelCase__ : Tuple = np.random.choice(ranges[0] ) UpperCAmelCase__ : Tuple = np.random.choice(ranges[1] ) UpperCAmelCase__ : str = np.random.choice(ranges[2] ) UpperCAmelCase__ : List[str] = mel[idx_front : idx_front + chunk_frames, :] UpperCAmelCase__ : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] UpperCAmelCase__ : Dict = mel[idx_back : idx_back + chunk_frames, :] UpperCAmelCase__ : Optional[Any] = torch.tensor(mel[None, None, :] ) UpperCAmelCase__ : int = torch.nn.functional.interpolate( A ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=A ) UpperCAmelCase__ : Dict = mel_shrink[0][0].numpy() UpperCAmelCase__ : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def __lowercase ( self : Any ,A : np.array ,A : Optional[int] ,A : Any ,A : Tuple ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": UpperCAmelCase__ : int = True # random crop to max_length (for compatibility) -> this should be handled by self.pad UpperCAmelCase__ : str = len(A ) - max_length UpperCAmelCase__ : Optional[Any] = np.random.randint(0 ,overflow + 1 ) UpperCAmelCase__ : Optional[int] = waveform[idx : idx + max_length] UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": UpperCAmelCase__ : Tuple = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed UpperCAmelCase__ : int = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. UpperCAmelCase__ : List[Any] = np.stack([mel, mel, mel, mel] ,axis=0 ) UpperCAmelCase__ : Any = False else: UpperCAmelCase__ : Union[str, Any] = self._random_mel_fusion(A ,A ,A ) UpperCAmelCase__ : List[str] = True else: raise NotImplementedError(f"data_truncating {truncation} not implemented" ) else: UpperCAmelCase__ : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": UpperCAmelCase__ : str = int(max_length / len(A ) ) UpperCAmelCase__ : int = np.stack(np.tile(A ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": UpperCAmelCase__ : List[Any] = int(max_length / len(A ) ) UpperCAmelCase__ : str = np.stack(np.tile(A ,A ) ) UpperCAmelCase__ : Optional[Any] = np.pad(A ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": UpperCAmelCase__ : int = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : str ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : str = None ,A : Optional[str] = None ,A : Optional[int] = None ,A : Optional[int] = None ,A : Optional[Union[str, TensorType]] = None ,**A : List[str] ,): '''simple docstring''' UpperCAmelCase__ : Optional[int] = truncation if truncation is not None else self.truncation UpperCAmelCase__ : Dict = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[int] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : List[str] = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Any = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [np.asarray(A )] # convert to mel spectrogram, truncate and pad if needed. UpperCAmelCase__ : Tuple = [ self._get_input_mel(A ,max_length if max_length else self.nb_max_samples ,A ,A ) for waveform in raw_speech ] UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Tuple = [] for mel, longer in padded_inputs: input_mel.append(A ) is_longer.append(A ) if truncation == "fusion" and sum(A ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer UpperCAmelCase__ : List[str] = np.random.randint(0 ,len(A ) ) UpperCAmelCase__ : int = True if isinstance(input_mel[0] ,A ): UpperCAmelCase__ : Tuple = [np.asarray(A ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool UpperCAmelCase__ : List[str] = [[longer] for longer in is_longer] UpperCAmelCase__ : List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} UpperCAmelCase__ : str = BatchFeature(A ) if return_tensors is not None: UpperCAmelCase__ : int = input_features.convert_to_tensors(A ) return input_features
65
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model') __UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'} __UpperCAmelCase = '>>zh<<' __UpperCAmelCase = 'Helsinki-NLP/' if is_torch_available(): __UpperCAmelCase = 'pt' elif is_tf_available(): __UpperCAmelCase = 'tf' else: __UpperCAmelCase = 'jax' @require_sentencepiece class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = MarianTokenizer snake_case_ = False snake_case_ = True def __lowercase ( self : Optional[int] ): '''simple docstring''' super().setUp() UpperCAmelCase__ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] UpperCAmelCase__ : int = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[int] = Path(self.tmpdirname ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(A ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(A ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) UpperCAmelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase ( self : List[Any] ,**A : List[Any] ): '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Union[str, Any] ,A : Tuple ): '''simple docstring''' return ( "This is a test", "This is a test", ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = """</s>""" UpperCAmelCase__ : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""</s>""" ) self.assertEqual(vocab_keys[1] ,"""<unk>""" ) self.assertEqual(vocab_keys[-1] ,"""<pad>""" ) self.assertEqual(len(A ) ,9 ) def __lowercase ( self : Dict ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,9 ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" ) UpperCAmelCase__ : List[str] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=A ) self.assertIsInstance(A ,A ) UpperCAmelCase__ : str = [38, 121, 14, 697, 38_848, 0] self.assertListEqual(A ,batch.input_ids[0] ) UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(A ) UpperCAmelCase__ : Tuple = [x.name for x in Path(A ).glob("""*""" )] self.assertIn("""source.spm""" ,A ) MarianTokenizer.from_pretrained(A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = tok( ["""I am a small frog""" * 1_000, """I am a small frog"""] ,padding=A ,truncation=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch.input_ids.shape ,(2, 512) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=A ,return_tensors=A ) self.assertIsInstance(A ,A ) self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) ) @slow def __lowercase ( self : Dict ): '''simple docstring''' # fmt: off UpperCAmelCase__ : Optional[int] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) UpperCAmelCase__ : Any = """Tämä on testi""" UpperCAmelCase__ : int = """This is a test""" UpperCAmelCase__ : List[str] = [76, 7, 2_047, 2] UpperCAmelCase__ : Optional[Any] = [69, 12, 11, 940, 2] UpperCAmelCase__ : List[str] = tokenizer(A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : Optional[int] = tokenizer(text_target=A ).input_ids self.assertListEqual(A ,A ) UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A ) self.assertEqual(A ,A )
65
1
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : str = tokenizer(example["""content"""] , truncation=__UpperCamelCase )["""input_ids"""] UpperCAmelCase__ : str = len(example["""content"""] ) / len(output["""input_ids"""] ) return output __UpperCAmelCase = HfArgumentParser(PretokenizationArguments) __UpperCAmelCase = parser.parse_args() if args.num_workers is None: __UpperCAmelCase = multiprocessing.cpu_count() __UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) __UpperCAmelCase = time.time() __UpperCAmelCase = load_dataset(args.dataset_name, split='train') print(F"Dataset loaded in {time.time()-t_start:.2f}s") __UpperCAmelCase = time.time() __UpperCAmelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F"Dataset tokenized in {time.time()-t_start:.2f}s") __UpperCAmelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
65
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowercase ( metaclass=__lowerCamelCase ): snake_case_ = ["""onnx"""] def __init__( self : int ,*A : List[str] ,**A : int ): '''simple docstring''' requires_backends(self ,["""onnx"""] ) @classmethod def __lowercase ( cls : Optional[Any] ,*A : List[str] ,**A : Dict ): '''simple docstring''' requires_backends(cls ,["""onnx"""] ) @classmethod def __lowercase ( cls : List[Any] ,*A : Optional[int] ,**A : int ): '''simple docstring''' requires_backends(cls ,["""onnx"""] )
65
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : str = len(__UpperCamelCase ) UpperCAmelCase__ : str = [[0] * n for i in range(__UpperCamelCase )] for i in range(__UpperCamelCase ): UpperCAmelCase__ : int = y_points[i] for i in range(2 , __UpperCamelCase ): for j in range(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : Optional[Any] = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
65
"""simple docstring""" from argparse import ArgumentParser from .env import EnvironmentCommand def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) UpperCAmelCase__ : List[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(__UpperCamelCase ) # Let's go UpperCAmelCase__ : int = parser.parse_args() if not hasattr(__UpperCamelCase , """func""" ): parser.print_help() exit(1 ) # Run UpperCAmelCase__ : Union[str, Any] = args.func(__UpperCamelCase ) service.run() if __name__ == "__main__": main()
65
1
"""simple docstring""" import re def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : str = re.compile( r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" ) return bool(re.search(__UpperCamelCase , __UpperCamelCase ) ) if __name__ == "__main__": __UpperCAmelCase = '0094702343221' print(is_sri_lankan_phone_number(phone))
65
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __UpperCAmelCase = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __lowercase : snake_case_ = PegasusConfig snake_case_ = {} snake_case_ = """gelu""" def __init__( self : List[Any] ,A : int ,A : Optional[Any]=13 ,A : Dict=7 ,A : Dict=True ,A : Any=False ,A : Dict=99 ,A : int=32 ,A : Optional[int]=5 ,A : Union[str, Any]=4 ,A : Union[str, Any]=37 ,A : str=0.1 ,A : int=0.1 ,A : Optional[int]=20 ,A : Tuple=2 ,A : str=1 ,A : Optional[Any]=0 ,): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : int = is_training UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : Dict = hidden_size UpperCAmelCase__ : Optional[Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Any = intermediate_size UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = eos_token_id UpperCAmelCase__ : Union[str, Any] = pad_token_id UpperCAmelCase__ : List[str] = bos_token_id def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size ) UpperCAmelCase__ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 ) UpperCAmelCase__ : Any = np.concatenate([input_ids, eos_tensor] ,axis=1 ) UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase__ : str = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) UpperCAmelCase__ : Optional[Any] = prepare_pegasus_inputs_dict(A ,A ,A ) return config, inputs_dict def __lowercase ( self : Any ,A : Optional[int] ,A : str ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = 20 UpperCAmelCase__ : Dict = model_class_name(A ) UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A ) UpperCAmelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" ) UpperCAmelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,) UpperCAmelCase__ : Optional[int] = model.decode( decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,) UpperCAmelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" ) UpperCAmelCase__ : int = model.decode( decoder_input_ids[:, -1:] ,A ,decoder_attention_mask=A ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=A ,) UpperCAmelCase__ : Dict = model.decode(A ,A ) UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" ) def __lowercase ( self : Optional[int] ,A : str ,A : Dict ,A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = 20 UpperCAmelCase__ : str = model_class_name(A ) UpperCAmelCase__ : Any = model.encode(inputs_dict["""input_ids"""] ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) UpperCAmelCase__ : Optional[int] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] ,axis=-1 ,) UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A ) UpperCAmelCase__ : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,) UpperCAmelCase__ : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,) UpperCAmelCase__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" ) UpperCAmelCase__ : Dict = model.decode( decoder_input_ids[:, -1:] ,A ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=A ,decoder_position_ids=A ,) UpperCAmelCase__ : Union[str, Any] = model.decode(A ,A ,decoder_attention_mask=A ) UpperCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase__ : Union[str, Any] = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: UpperCAmelCase__ : Tuple = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) snake_case_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () snake_case_ = True snake_case_ = False snake_case_ = False snake_case_ = False def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = FlaxPegasusModelTester(self ) UpperCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=A ) def __lowercase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(A ,A ,A ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(A ,A ,A ) def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A ) UpperCAmelCase__ : int = model_class(A ) @jax.jit def encode_jitted(A : Optional[int] ,A : Union[str, Any]=None ,**A : Optional[Any] ): return model.encode(input_ids=A ,attention_mask=A ) with self.subTest("""JIT Enabled""" ): UpperCAmelCase__ : int = encode_jitted(**A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCAmelCase__ : Dict = encode_jitted(**A ).to_tuple() self.assertEqual(len(A ) ,len(A ) ) for jitted_output, output in zip(A ,A ): self.assertEqual(jitted_output.shape ,output.shape ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : Dict = model_class(A ) UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] ) UpperCAmelCase__ : Dict = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(A : List[Any] ,A : Any ,A : List[Any] ): return model.decode( decoder_input_ids=A ,decoder_attention_mask=A ,encoder_outputs=A ,) with self.subTest("""JIT Enabled""" ): UpperCAmelCase__ : Tuple = decode_jitted(**A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCAmelCase__ : str = decode_jitted(**A ).to_tuple() self.assertEqual(len(A ) ,len(A ) ) for jitted_output, output in zip(A ,A ): self.assertEqual(jitted_output.shape ,output.shape ) @slow def __lowercase ( self : List[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=A ) UpperCAmelCase__ : Any = np.ones((1, 1) ) UpperCAmelCase__ : Optional[Any] = model(A ) self.assertIsNotNone(A ) @slow def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) UpperCAmelCase__ : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) UpperCAmelCase__ : Union[str, Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] UpperCAmelCase__ : str = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] UpperCAmelCase__ : str = tokenizer(A ,return_tensors="""np""" ,truncation=A ,max_length=512 ,padding=A ) UpperCAmelCase__ : Union[str, Any] = model.generate(**A ,num_beams=2 ).sequences UpperCAmelCase__ : int = tokenizer.batch_decode(A ,skip_special_tokens=A ) assert tgt_text == decoded
65
1
"""simple docstring""" from math import isqrt, loga def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Tuple = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : Dict = False return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]] def lowerCAmelCase ( __UpperCamelCase = 800800 , __UpperCamelCase = 800800 ): '''simple docstring''' UpperCAmelCase__ : List[Any] = degree * loga(__UpperCamelCase ) UpperCAmelCase__ : Optional[int] = int(__UpperCamelCase ) UpperCAmelCase__ : List[str] = calculate_prime_numbers(__UpperCamelCase ) UpperCAmelCase__ : Optional[int] = 0 UpperCAmelCase__ : int = 0 UpperCAmelCase__ : int = len(__UpperCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"{solution() = }")
65
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) UpperCAmelCase__ : Union[str, Any] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
65
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __lowercase : snake_case_ = BlenderbotSmallConfig snake_case_ = {} snake_case_ = """gelu""" def __init__( self : Optional[int] ,A : List[str] ,A : Dict=13 ,A : List[str]=7 ,A : List[str]=True ,A : Dict=False ,A : str=99 ,A : Any=32 ,A : Union[str, Any]=2 ,A : List[Any]=4 ,A : Optional[Any]=37 ,A : Dict=0.1 ,A : List[Any]=0.1 ,A : Tuple=20 ,A : int=2 ,A : Union[str, Any]=1 ,A : List[Any]=0 ,): '''simple docstring''' UpperCAmelCase__ : Dict = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : List[Any] = is_training UpperCAmelCase__ : Optional[int] = use_labels UpperCAmelCase__ : Tuple = vocab_size UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Optional[int] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob UpperCAmelCase__ : List[str] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = max_position_embeddings UpperCAmelCase__ : Tuple = eos_token_id UpperCAmelCase__ : Optional[Any] = pad_token_id UpperCAmelCase__ : Any = bos_token_id def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) UpperCAmelCase__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) UpperCAmelCase__ : List[str] = tf.concat([input_ids, eos_tensor] ,axis=1 ) UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase__ : Optional[int] = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) UpperCAmelCase__ : Union[str, Any] = prepare_blenderbot_small_inputs_dict(A ,A ,A ) return config, inputs_dict def __lowercase ( self : int ,A : int ,A : str ): '''simple docstring''' UpperCAmelCase__ : int = TFBlenderbotSmallModel(config=A ).get_decoder() UpperCAmelCase__ : int = inputs_dict["""input_ids"""] UpperCAmelCase__ : Tuple = input_ids[:1, :] UpperCAmelCase__ : int = inputs_dict["""attention_mask"""][:1, :] UpperCAmelCase__ : List[str] = inputs_dict["""head_mask"""] UpperCAmelCase__ : int = 1 # first forward pass UpperCAmelCase__ : List[str] = model(A ,attention_mask=A ,head_mask=A ,use_cache=A ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase__ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) UpperCAmelCase__ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and UpperCAmelCase__ : List[str] = tf.concat([input_ids, next_tokens] ,axis=-1 ) UpperCAmelCase__ : Optional[int] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) UpperCAmelCase__ : Dict = model(A ,attention_mask=A )[0] UpperCAmelCase__ : Dict = model(A ,attention_mask=A ,past_key_values=A )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice UpperCAmelCase__ : Optional[int] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) UpperCAmelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(A ,A ,rtol=1e-3 ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase__ : Optional[int] = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase__ : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase__ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): snake_case_ = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) snake_case_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () snake_case_ = ( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) snake_case_ = True snake_case_ = False snake_case_ = False def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = TFBlenderbotSmallModelTester(self ) UpperCAmelCase__ : Tuple = ConfigTester(self ,config_class=A ) def __lowercase ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*A ) @require_tokenizers @require_tf class __lowercase ( unittest.TestCase ): snake_case_ = [ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i'm going to throw up.\nand why is that?""" ] snake_case_ = """facebook/blenderbot_small-90M""" @cached_property def __lowercase ( self : Tuple ): '''simple docstring''' # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) @cached_property def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.tokenizer(self.src_text ,return_tensors="""tf""" ) UpperCAmelCase__ : List[Any] = self.model.generate( model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 ,use_cache=A ,) UpperCAmelCase__ : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=A )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
65
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def lowerCAmelCase ( __UpperCamelCase = "isbn/0140328726" ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: UpperCAmelCase__ : Dict = F"{olid} is not a valid Open Library olid" raise ValueError(__UpperCamelCase ) return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json() def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Any = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } UpperCAmelCase__ : Dict = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} UpperCAmelCase__ : str = [ get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""] ] UpperCAmelCase__ : Dict = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : Dict = """, """.join(__UpperCamelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __UpperCAmelCase = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.") continue print(F"\nSearching Open Library for ISBN: {isbn}...\n") try: __UpperCAmelCase = summarize_book(get_openlibrary_data(F"isbn/{isbn}")) print('\n'.join(F"{key}: {value}" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"Sorry, there are no results for ISBN: {isbn}.")
65
1
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' assert isinstance(__UpperCamelCase , __UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : List[Any] = tmp_path / """cache""" UpperCAmelCase__ : List[Any] = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase__ : Union[str, Any] = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read() _check_text_dataset(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Dict = tmp_path / """cache""" UpperCAmelCase__ : Tuple = {"""text""": """string"""} UpperCAmelCase__ : Dict = features.copy() if features else default_expected_features UpperCAmelCase__ : Tuple = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase__ : Optional[Any] = TextDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_text_dataset(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : List[Any] = tmp_path / """cache""" UpperCAmelCase__ : str = {"""text""": """string"""} UpperCAmelCase__ : Dict = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read() _check_text_dataset(__UpperCamelCase , __UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if issubclass(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : Union[str, Any] = text_path elif issubclass(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase__ : Tuple = [text_path] UpperCAmelCase__ : Dict = tmp_path / """cache""" UpperCAmelCase__ : Optional[Any] = {"""text""": """string"""} UpperCAmelCase__ : Union[str, Any] = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_text_dataset(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ): '''simple docstring''' assert isinstance(__UpperCamelCase , __UpperCamelCase ) for split in splits: UpperCAmelCase__ : Any = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Any = tmp_path / """cache""" UpperCAmelCase__ : Tuple = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase__ : Dict = TextDatasetReader({"""train""": text_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read() _check_text_datasetdict(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : List[Any] = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" UpperCAmelCase__ : Optional[int] = {"""text""": """string"""} UpperCAmelCase__ : Optional[Any] = features.copy() if features else default_expected_features UpperCAmelCase__ : str = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase__ : Tuple = TextDatasetReader({"""train""": text_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_text_datasetdict(__UpperCamelCase , __UpperCamelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if split: UpperCAmelCase__ : Tuple = {split: text_path} else: UpperCAmelCase__ : Optional[int] = """train""" UpperCAmelCase__ : Optional[Any] = {"""train""": text_path, """test""": text_path} UpperCAmelCase__ : List[str] = tmp_path / """cache""" UpperCAmelCase__ : Optional[Any] = {"""text""": """string"""} UpperCAmelCase__ : str = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read() _check_text_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
65
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase="pt" ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {"""add_prefix_space""": True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(""" """ ) else {} UpperCAmelCase__ : List[str] = padding_side return tokenizer( [line] , max_length=__UpperCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , ): '''simple docstring''' UpperCAmelCase__ : str = input_ids.ne(__UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowercase ( __lowerCamelCase ): def __init__( self : Tuple ,A : List[Any] ,A : Union[str, Any] ,A : Any ,A : Optional[int] ,A : Union[str, Any]="train" ,A : Tuple=None ,A : Union[str, Any]=None ,A : Tuple=None ,A : int="" ,): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[Any] = Path(A ).joinpath(type_path + """.source""" ) UpperCAmelCase__ : List[str] = Path(A ).joinpath(type_path + """.target""" ) UpperCAmelCase__ : Dict = self.get_char_lens(self.src_file ) UpperCAmelCase__ : int = max_source_length UpperCAmelCase__ : List[str] = max_target_length assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}" UpperCAmelCase__ : Dict = tokenizer UpperCAmelCase__ : str = prefix if n_obs is not None: UpperCAmelCase__ : int = self.src_lens[:n_obs] UpperCAmelCase__ : Any = src_lang UpperCAmelCase__ : Any = tgt_lang def __len__( self : Optional[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = index + 1 # linecache starts at 1 UpperCAmelCase__ : Tuple = self.prefix + linecache.getline(str(self.src_file ) ,A ).rstrip("""\n""" ) UpperCAmelCase__ : Dict = linecache.getline(str(self.tgt_file ) ,A ).rstrip("""\n""" ) assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,A ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCAmelCase__ : str = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,A ) else self.tokenizer ) UpperCAmelCase__ : Tuple = self.tokenizer.generator if isinstance(self.tokenizer ,A ) else self.tokenizer UpperCAmelCase__ : Tuple = encode_line(A ,A ,self.max_source_length ,"""right""" ) UpperCAmelCase__ : Dict = encode_line(A ,A ,self.max_target_length ,"""right""" ) UpperCAmelCase__ : Optional[Any] = source_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : List[str] = target_inputs["""input_ids"""].squeeze() UpperCAmelCase__ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def __lowercase ( A : int ): '''simple docstring''' return [len(A ) for x in Path(A ).open().readlines()] def __lowercase ( self : List[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : int = torch.stack([x["""input_ids"""] for x in batch] ) UpperCAmelCase__ : Union[str, Any] = torch.stack([x["""attention_mask"""] for x in batch] ) UpperCAmelCase__ : Any = torch.stack([x["""decoder_input_ids"""] for x in batch] ) UpperCAmelCase__ : List[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : Any = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,A ) else self.tokenizer.pad_token_id ) UpperCAmelCase__ : str = trim_batch(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = trim_batch(A ,A ,attention_mask=A ) UpperCAmelCase__ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return list(itertools.chain.from_iterable(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Dict = get_git_info() save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , """git_log.json""" ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=4 , **__UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """w""" ) as f: json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase ) as f: return json.load(__UpperCamelCase ) def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = git.Repo(search_parent_directories=__UpperCamelCase ) UpperCAmelCase__ : List[str] = { """repo_id""": str(__UpperCamelCase ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return list(map(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' with open(__UpperCamelCase , """wb""" ) as f: return pickle.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' def remove_articles(__UpperCamelCase ): return re.sub(r"""\b(a|an|the)\b""" , """ """ , __UpperCamelCase ) def white_space_fix(__UpperCamelCase ): return " ".join(text.split() ) def remove_punc(__UpperCamelCase ): UpperCAmelCase__ : List[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : Dict = normalize_answer(__UpperCamelCase ).split() UpperCAmelCase__ : int = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase ) UpperCAmelCase__ : List[str] = sum(common.values() ) if num_same == 0: return 0 UpperCAmelCase__ : str = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : Optional[int] = 1.0 * num_same / len(__UpperCamelCase ) UpperCAmelCase__ : Tuple = (2 * precision * recall) / (precision + recall) return fa def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' assert len(__UpperCamelCase ) == len(__UpperCamelCase ) UpperCAmelCase__ : Union[str, Any] = 0 for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ): em += exact_match_score(__UpperCamelCase , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: em /= len(__UpperCamelCase ) return {"em": em} def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCAmelCase__ : str = """dropout_rate""" for p in extra_params: if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(__UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) continue UpperCAmelCase__ : Tuple = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p] setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) return hparams, config
65
1
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} __UpperCAmelCase = { 'vocab_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json' ), }, 'merges_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt' ), }, } __UpperCAmelCase = { 'allenai/longformer-base-4096': 4096, 'allenai/longformer-large-4096': 4096, 'allenai/longformer-large-4096-finetuned-triviaqa': 4096, 'allenai/longformer-base-4096-extra.pos.embd.only': 4096, 'allenai/longformer-large-4096-extra.pos.embd.only': 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCAmelCase__ : Optional[Any] = bs[:] UpperCAmelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 UpperCAmelCase__ : Any = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : int = set() UpperCAmelCase__ : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Tuple = char return pairs class __lowercase ( __lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] ,A : Optional[Any] ,A : Union[str, Any] ,A : Union[str, Any]="replace" ,A : Any="<s>" ,A : List[Any]="</s>" ,A : Any="</s>" ,A : str="<s>" ,A : Optional[Any]="<unk>" ,A : int="<pad>" ,A : Dict="<mask>" ,A : Optional[Any]=False ,**A : str ,): '''simple docstring''' UpperCAmelCase__ : List[Any] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token UpperCAmelCase__ : Union[str, Any] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token UpperCAmelCase__ : str = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token UpperCAmelCase__ : Union[str, Any] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token UpperCAmelCase__ : str = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token UpperCAmelCase__ : str = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase__ : str = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token super().__init__( errors=A ,bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,add_prefix_space=A ,**A ,) with open(A ,encoding="""utf-8""" ) as vocab_handle: UpperCAmelCase__ : str = json.load(A ) UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()} UpperCAmelCase__ : Optional[Any] = errors # how to handle errors in decoding UpperCAmelCase__ : Tuple = bytes_to_unicode() UpperCAmelCase__ : Dict = {v: k for k, v in self.byte_encoder.items()} with open(A ,encoding="""utf-8""" ) as merges_handle: UpperCAmelCase__ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1] UpperCAmelCase__ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase__ : List[str] = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Union[str, Any] = {} UpperCAmelCase__ : List[str] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase__ : str = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def __lowercase ( self : List[Any] ): '''simple docstring''' return len(self.encoder ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def __lowercase ( self : Optional[int] ,A : Union[str, Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[int] = tuple(A ) UpperCAmelCase__ : int = get_pairs(A ) if not pairs: return token while True: UpperCAmelCase__ : Optional[int] = min(A ,key=lambda A : self.bpe_ranks.get(A ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : int = bigram UpperCAmelCase__ : int = [] UpperCAmelCase__ : Any = 0 while i < len(A ): try: UpperCAmelCase__ : Any = word.index(A ,A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Optional[int] = j if word[i] == first and i < len(A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : List[str] = tuple(A ) UpperCAmelCase__ : str = new_word if len(A ) == 1: break else: UpperCAmelCase__ : Any = get_pairs(A ) UpperCAmelCase__ : List[str] = """ """.join(A ) UpperCAmelCase__ : List[Any] = word return word def __lowercase ( self : List[Any] ,A : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = [] for token in re.findall(self.pat ,A ): UpperCAmelCase__ : List[str] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) ) return bpe_tokens def __lowercase ( self : Dict ,A : List[Any] ): '''simple docstring''' return self.encoder.get(A ,self.encoder.get(self.unk_token ) ) def __lowercase ( self : Optional[int] ,A : Any ): '''simple docstring''' return self.decoder.get(A ) def __lowercase ( self : Dict ,A : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = """""".join(A ) UpperCAmelCase__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def __lowercase ( self : List[str] ,A : str ,A : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(A ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase__ : str = os.path.join( A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : Dict = os.path.join( A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(A ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A ,ensure_ascii=A ) + """\n""" ) UpperCAmelCase__ : str = 0 with open(A ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." """ Please check that the tokenizer is not corrupted!""" ) UpperCAmelCase__ : Optional[int] = token_index writer.write(""" """.join(A ) + """\n""" ) index += 1 return vocab_file, merge_file def __lowercase ( self : str ,A : List[int] ,A : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : Optional[Any] = [self.cls_token_id] UpperCAmelCase__ : List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowercase ( self : List[Any] ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def __lowercase ( self : List[str] ,A : List[int] ,A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = [self.sep_token_id] UpperCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowercase ( self : str ,A : Union[str, Any] ,A : int=False ,**A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()): UpperCAmelCase__ : List[str] = """ """ + text return (text, kwargs)
65
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaControlnetPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def __lowercase ( self : int ): '''simple docstring''' return 32 @property def __lowercase ( self : Dict ): '''simple docstring''' return self.time_input_dim @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __lowercase ( self : Any ): '''simple docstring''' return 100 @property def __lowercase ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase__ : int = UNetaDConditionModel(**A ) return model @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowercase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs ) return model def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.dummy_unet UpperCAmelCase__ : List[Any] = self.dummy_movq UpperCAmelCase__ : List[Any] = DDIMScheduler( num_train_timesteps=1_000 ,beta_schedule="""linear""" ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=A ,set_alpha_to_one=A ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=A ,) UpperCAmelCase__ : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __lowercase ( self : str ,A : Optional[Any] ,A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( A ) # create hint UpperCAmelCase__ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase__ : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase__ : Dict = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase__ : Dict = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = """cpu""" UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Union[str, Any] = self.pipeline_class(**A ) UpperCAmelCase__ : Optional[int] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(A ) ) UpperCAmelCase__ : Tuple = output.images UpperCAmelCase__ : Dict = pipe( **self.get_dummy_inputs(A ) ,return_dict=A ,)[0] UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Optional[int] = np.array( [0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def __lowercase ( self : Union[str, Any] ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) UpperCAmelCase__ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) UpperCAmelCase__ : int = torch.from_numpy(np.array(A ) ).float() / 2_5_5.0 UpperCAmelCase__ : Union[str, Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) UpperCAmelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(A ) UpperCAmelCase__ : List[Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) UpperCAmelCase__ : int = pipeline.to(A ) pipeline.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[Any] = """A robot, 4k photo""" UpperCAmelCase__ : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = pipe_prior( A ,generator=A ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() UpperCAmelCase__ : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ : int = pipeline( image_embeds=A ,negative_image_embeds=A ,hint=A ,generator=A ,num_inference_steps=100 ,output_type="""np""" ,) UpperCAmelCase__ : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A ,A )
65
1
"""simple docstring""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class __lowercase ( __lowerCamelCase ): snake_case_ = """data2vec-audio""" def __init__( self : Any ,A : Any=32 ,A : Any=768 ,A : List[Any]=12 ,A : Union[str, Any]=12 ,A : str=3_072 ,A : List[str]="gelu" ,A : Optional[Any]=0.1 ,A : List[Any]=0.1 ,A : Tuple=0.1 ,A : Tuple=0.0 ,A : Optional[Any]=0.1 ,A : List[str]=0.1 ,A : Union[str, Any]=0.0_2 ,A : int=1e-5 ,A : Optional[Any]="gelu" ,A : int=(512, 512, 512, 512, 512, 512, 512) ,A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) ,A : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) ,A : Any=False ,A : Any=16 ,A : Tuple=19 ,A : Union[str, Any]=5 ,A : Tuple=0.0_5 ,A : Dict=10 ,A : str=2 ,A : Optional[Any]=0.0 ,A : List[Any]=10 ,A : Optional[int]=0 ,A : Union[str, Any]="sum" ,A : List[str]=False ,A : Optional[int]=False ,A : Any=256 ,A : Optional[int]=(512, 512, 512, 512, 1_500) ,A : Dict=(5, 3, 3, 1, 1) ,A : Optional[Any]=(1, 2, 3, 1, 1) ,A : Optional[Any]=512 ,A : List[Any]=0 ,A : Optional[int]=1 ,A : Union[str, Any]=2 ,A : List[str]=False ,A : Optional[Any]=3 ,A : str=2 ,A : int=3 ,A : Any=None ,**A : Any ,): '''simple docstring''' super().__init__(**A ,pad_token_id=A ,bos_token_id=A ,eos_token_id=A ) UpperCAmelCase__ : Optional[int] = hidden_size UpperCAmelCase__ : Any = feat_extract_activation UpperCAmelCase__ : int = list(A ) UpperCAmelCase__ : Union[str, Any] = list(A ) UpperCAmelCase__ : Union[str, Any] = list(A ) UpperCAmelCase__ : Dict = conv_bias UpperCAmelCase__ : Optional[Any] = num_conv_pos_embeddings UpperCAmelCase__ : Union[str, Any] = num_conv_pos_embedding_groups UpperCAmelCase__ : Tuple = conv_pos_kernel_size UpperCAmelCase__ : Optional[int] = len(self.conv_dim ) UpperCAmelCase__ : Tuple = num_hidden_layers UpperCAmelCase__ : Any = intermediate_size UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : Tuple = num_attention_heads UpperCAmelCase__ : Any = hidden_dropout UpperCAmelCase__ : int = attention_dropout UpperCAmelCase__ : Optional[int] = activation_dropout UpperCAmelCase__ : Optional[Any] = feat_proj_dropout UpperCAmelCase__ : List[str] = final_dropout UpperCAmelCase__ : Tuple = layerdrop UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase__ : List[str] = mask_time_prob UpperCAmelCase__ : Optional[int] = mask_time_length UpperCAmelCase__ : List[Any] = mask_time_min_masks UpperCAmelCase__ : Tuple = mask_feature_prob UpperCAmelCase__ : Optional[Any] = mask_feature_length UpperCAmelCase__ : Optional[int] = mask_feature_min_masks # ctc loss UpperCAmelCase__ : Any = ctc_loss_reduction UpperCAmelCase__ : Union[str, Any] = ctc_zero_infinity # adapter UpperCAmelCase__ : Any = add_adapter UpperCAmelCase__ : Tuple = adapter_kernel_size UpperCAmelCase__ : List[Any] = adapter_stride UpperCAmelCase__ : Tuple = num_adapter_layers UpperCAmelCase__ : Union[str, Any] = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase__ : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase__ : Union[str, Any] = list(A ) UpperCAmelCase__ : List[Any] = list(A ) UpperCAmelCase__ : Dict = list(A ) UpperCAmelCase__ : str = xvector_output_dim @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return math.prod(self.conv_stride )
65
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = """vision-encoder-decoder""" snake_case_ = True def __init__( self : List[Any] ,**A : Union[str, Any] ): '''simple docstring''' super().__init__(**A ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" ) UpperCAmelCase__ : int = kwargs.pop("""encoder""" ) UpperCAmelCase__ : int = encoder_config.pop("""model_type""" ) UpperCAmelCase__ : str = kwargs.pop("""decoder""" ) UpperCAmelCase__ : Dict = decoder_config.pop("""model_type""" ) UpperCAmelCase__ : List[Any] = AutoConfig.for_model(A ,**A ) UpperCAmelCase__ : Any = AutoConfig.for_model(A ,**A ) UpperCAmelCase__ : Union[str, Any] = True @classmethod def __lowercase ( cls : List[Any] ,A : PretrainedConfig ,A : PretrainedConfig ,**A : Tuple ): '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) UpperCAmelCase__ : Union[str, Any] = True UpperCAmelCase__ : List[Any] = True return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**A ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Dict = self.encoder.to_dict() UpperCAmelCase__ : Any = self.decoder.to_dict() UpperCAmelCase__ : Dict = self.__class__.model_type return output class __lowercase ( __lowerCamelCase ): snake_case_ = version.parse("""1.11""" ) @property def __lowercase ( self : Optional[int] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowercase ( self : List[Any] ): '''simple docstring''' return 1e-4 @property def __lowercase ( self : List[Any] ): '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class __lowercase ( __lowerCamelCase ): @property def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : int = OrderedDict() UpperCAmelCase__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase__ : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} UpperCAmelCase__ : List[str] = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def __lowercase ( self : Dict ,A : "PreTrainedTokenizerBase" ,A : int = -1 ,A : int = -1 ,A : bool = False ,A : Optional["TensorType"] = None ,): '''simple docstring''' import torch UpperCAmelCase__ : int = OrderedDict() UpperCAmelCase__ : List[Any] = super().generate_dummy_inputs( A ,batch_size=A ,seq_length=A ,is_pair=A ,framework=A ) UpperCAmelCase__ , UpperCAmelCase__ : int = dummy_input["""input_ids"""].shape UpperCAmelCase__ : int = (batch, encoder_sequence, self._config.encoder_hidden_size) UpperCAmelCase__ : Tuple = dummy_input.pop("""input_ids""" ) UpperCAmelCase__ : Optional[int] = dummy_input.pop("""attention_mask""" ) UpperCAmelCase__ : Dict = torch.zeros(A ) return common_inputs class __lowercase ( __lowerCamelCase ): @property def __lowercase ( self : str ): '''simple docstring''' pass def __lowercase ( self : Any ,A : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(A ) def __lowercase ( self : Dict ,A : PretrainedConfig ,A : PretrainedConfig ,A : str = "default" ): '''simple docstring''' UpperCAmelCase__ : List[str] = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(A ,A )
65
1
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="attention" ): '''simple docstring''' UpperCAmelCase__ : Any = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"] UpperCAmelCase__ : Union[str, Any] = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"] UpperCAmelCase__ : Any = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"] UpperCAmelCase__ : List[str] = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ): '''simple docstring''' if split_mlp_wi: UpperCAmelCase__ : str = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"] UpperCAmelCase__ : Any = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"] UpperCAmelCase__ : Tuple = (wi_a, wi_a) else: UpperCAmelCase__ : Optional[Any] = params[F"{prefix}/layers_{i}/mlp/wi/kernel"] UpperCAmelCase__ : Union[str, Any] = params[F"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return params[F"{prefix}/layers_{i}/{layer_name}/scale"] def lowerCAmelCase ( __UpperCamelCase , *, __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : List[str] = traverse_util.flatten_dict(variables["""target"""] ) UpperCAmelCase__ : Dict = {"""/""".join(__UpperCamelCase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCAmelCase__ : Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , __UpperCamelCase ) UpperCAmelCase__ : Union[str, Any] = collections.OrderedDict() # Shared embeddings. UpperCAmelCase__ : Optional[Any] = old["""token_embedder/embedding"""] # Encoder. for i in range(__UpperCamelCase ): # Block i, layer 0 (Self Attention). UpperCAmelCase__ : int = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_attention_layer_norm""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """attention""" ) UpperCAmelCase__ : Tuple = layer_norm UpperCAmelCase__ : int = k.T UpperCAmelCase__ : Optional[int] = o.T UpperCAmelCase__ : str = q.T UpperCAmelCase__ : Optional[int] = v.T # Block i, layer 1 (MLP). UpperCAmelCase__ : List[str] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_mlp_layer_norm""" ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , __UpperCamelCase ) UpperCAmelCase__ : Dict = layer_norm if split_mlp_wi: UpperCAmelCase__ : Optional[int] = wi[0].T UpperCAmelCase__ : List[str] = wi[1].T else: UpperCAmelCase__ : Any = wi.T UpperCAmelCase__ : List[Any] = wo.T UpperCAmelCase__ : Optional[int] = old[ """encoder/relpos_bias/rel_embedding""" ].T UpperCAmelCase__ : List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(__UpperCamelCase ): # Block i, layer 0 (Self Attention). UpperCAmelCase__ : Tuple = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_self_attention_layer_norm""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """self_attention""" ) UpperCAmelCase__ : str = layer_norm UpperCAmelCase__ : Union[str, Any] = k.T UpperCAmelCase__ : List[str] = o.T UpperCAmelCase__ : int = q.T UpperCAmelCase__ : Dict = v.T # Block i, layer 1 (Cross Attention). UpperCAmelCase__ : Optional[Any] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_cross_attention_layer_norm""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """encoder_decoder_attention""" ) UpperCAmelCase__ : List[Any] = layer_norm UpperCAmelCase__ : str = k.T UpperCAmelCase__ : Optional[Any] = o.T UpperCAmelCase__ : Any = q.T UpperCAmelCase__ : Union[str, Any] = v.T # Block i, layer 2 (MLP). UpperCAmelCase__ : int = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_mlp_layer_norm""" ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , __UpperCamelCase ) UpperCAmelCase__ : Tuple = layer_norm if split_mlp_wi: UpperCAmelCase__ : Any = wi[0].T UpperCAmelCase__ : Optional[Any] = wi[1].T else: UpperCAmelCase__ : Tuple = wi.T UpperCAmelCase__ : int = wo.T UpperCAmelCase__ : int = old["""decoder/decoder_norm/scale"""] UpperCAmelCase__ : Dict = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCAmelCase__ : int = old["""decoder/logits_dense/kernel"""].T return new def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCAmelCase__ : Tuple = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCAmelCase__ : List[Any] = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) UpperCAmelCase__ : List[Any] = state_dict["""shared.weight"""] return state_dict def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : List[str] = checkpoints.load_tax_checkpoint(__UpperCamelCase ) UpperCAmelCase__ : str = convert_tax_to_pytorch(__UpperCamelCase , num_layers=config.num_layers , is_encoder_only=__UpperCamelCase ) UpperCAmelCase__ : int = make_state_dict(__UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ): '''simple docstring''' UpperCAmelCase__ : List[str] = TaConfig.from_json_file(__UpperCamelCase ) print(F"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCAmelCase__ : int = TaEncoderModel(__UpperCamelCase ) else: UpperCAmelCase__ : List[Any] = TaForConditionalGeneration(__UpperCamelCase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(__UpperCamelCase ) # Verify that we can load the checkpoint. model.from_pretrained(__UpperCamelCase ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
65
"""simple docstring""" import requests def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Tuple = {"""Content-Type""": """application/json"""} UpperCAmelCase__ : Optional[Any] = requests.post(__UpperCamelCase , json={"""text""": message_body} , headers=__UpperCamelCase ) if response.status_code != 200: UpperCAmelCase__ : Any = ( """Request to slack returned an error """ F"{response.status_code}, the response is:\n{response.text}" ) raise ValueError(__UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
65
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) UpperCAmelCase__ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b" UpperCAmelCase__ : Dict = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b" UpperCAmelCase__ : Any = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
65
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = CTRLTokenizer snake_case_ = False snake_case_ = False def __lowercase ( self : List[str] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase__ : Optional[int] = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase__ : int = {"""unk_token""": """<unk>"""} UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def __lowercase ( self : int ,**A : Dict ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : List[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = """adapt react readapt apt""" UpperCAmelCase__ : Any = """adapt react readapt apt""" return input_text, output_text def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase__ : Tuple = """adapt react readapt apt""" UpperCAmelCase__ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase__ : Dict = tokenizer.tokenize(A ) self.assertListEqual(A ,A ) UpperCAmelCase__ : Any = tokens + [tokenizer.unk_token] UpperCAmelCase__ : Dict = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
65
1
"""simple docstring""" import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __UpperCAmelCase = 3 def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' print("""Generating primitive root of p""" ) while True: UpperCAmelCase__ : List[Any] = random.randrange(3 , __UpperCamelCase ) if pow(__UpperCamelCase , 2 , __UpperCamelCase ) == 1: continue if pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) == 1: continue return g def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' print("""Generating prime p...""" ) UpperCAmelCase__ : str = rabin_miller.generate_large_prime(__UpperCamelCase ) # select large prime number. UpperCAmelCase__ : Optional[Any] = primitive_root(__UpperCamelCase ) # one primitive root on modulo p. UpperCAmelCase__ : List[str] = random.randrange(3 , __UpperCamelCase ) # private_key -> have to be greater than 2 for safety. UpperCAmelCase__ : List[str] = cryptomath.find_mod_inverse(pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) UpperCAmelCase__ : Any = (key_size, e_a, e_a, p) UpperCAmelCase__ : List[str] = (key_size, d) return public_key, private_key def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ): print("""\nWARNING:""" ) print( F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n" """Use a different name or delete these files and re-run this program.""" ) sys.exit() UpperCAmelCase__ , UpperCAmelCase__ : str = generate_key(__UpperCamelCase ) print(F"\nWriting public key to file {name}_pubkey.txt..." ) with open(F"{name}_pubkey.txt" , """w""" ) as fo: fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" ) print(F"Writing private key to file {name}_privkey.txt..." ) with open(F"{name}_privkey.txt" , """w""" ) as fo: fo.write(F"{private_key[0]},{private_key[1]}" ) def lowerCAmelCase ( ): '''simple docstring''' print("""Making key files...""" ) make_key_files("""elgamal""" , 2048 ) print("""Key files generation successful""" ) if __name__ == "__main__": main()
65
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
65
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'caidas/swin2sr-classicalsr-x2-64': ( 'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json' ), } class __lowercase ( __lowerCamelCase ): snake_case_ = """swin2sr""" snake_case_ = { """hidden_size""": """embed_dim""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[str] ,A : Tuple=64 ,A : Tuple=1 ,A : List[Any]=3 ,A : Dict=180 ,A : int=[6, 6, 6, 6, 6, 6] ,A : Optional[Any]=[6, 6, 6, 6, 6, 6] ,A : Any=8 ,A : Optional[int]=2.0 ,A : Tuple=True ,A : int=0.0 ,A : Dict=0.0 ,A : int=0.1 ,A : List[Any]="gelu" ,A : Optional[Any]=False ,A : Union[str, Any]=0.0_2 ,A : str=1e-5 ,A : Optional[int]=2 ,A : int=1.0 ,A : Any="1conv" ,A : Tuple="pixelshuffle" ,**A : Optional[int] ,): '''simple docstring''' super().__init__(**A ) UpperCAmelCase__ : List[Any] = image_size UpperCAmelCase__ : List[str] = patch_size UpperCAmelCase__ : Dict = num_channels UpperCAmelCase__ : str = embed_dim UpperCAmelCase__ : List[Any] = depths UpperCAmelCase__ : Union[str, Any] = len(A ) UpperCAmelCase__ : str = num_heads UpperCAmelCase__ : int = window_size UpperCAmelCase__ : int = mlp_ratio UpperCAmelCase__ : Any = qkv_bias UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[Any] = drop_path_rate UpperCAmelCase__ : Optional[int] = hidden_act UpperCAmelCase__ : List[Any] = use_absolute_embeddings UpperCAmelCase__ : Union[str, Any] = layer_norm_eps UpperCAmelCase__ : List[str] = initializer_range UpperCAmelCase__ : Dict = upscale UpperCAmelCase__ : str = img_range UpperCAmelCase__ : int = resi_connection UpperCAmelCase__ : str = upsampler
65
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """is_longer"""] def __init__( self : str ,A : Union[str, Any]=64 ,A : Tuple=48_000 ,A : Dict=480 ,A : List[str]=10 ,A : str=1_024 ,A : Any=0.0 ,A : Optional[int]=False ,A : float = 0 ,A : float = 14_000 ,A : int = None ,A : str = "fusion" ,A : str = "repeatpad" ,**A : List[Any] ,): '''simple docstring''' super().__init__( feature_size=A ,sampling_rate=A ,padding_value=A ,return_attention_mask=A ,**A ,) UpperCAmelCase__ : List[Any] = top_db UpperCAmelCase__ : Union[str, Any] = truncation UpperCAmelCase__ : Optional[int] = padding UpperCAmelCase__ : List[Any] = fft_window_size UpperCAmelCase__ : Optional[Any] = (fft_window_size >> 1) + 1 UpperCAmelCase__ : Any = hop_length UpperCAmelCase__ : List[str] = max_length_s UpperCAmelCase__ : List[Any] = max_length_s * sampling_rate UpperCAmelCase__ : List[Any] = sampling_rate UpperCAmelCase__ : Optional[int] = frequency_min UpperCAmelCase__ : Tuple = frequency_max UpperCAmelCase__ : List[str] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm=A ,mel_scale="""htk""" ,) UpperCAmelCase__ : str = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm="""slaney""" ,mel_scale="""slaney""" ,) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __lowercase ( self : List[str] ,A : np.array ,A : Optional[np.array] = None ): '''simple docstring''' UpperCAmelCase__ : Dict = spectrogram( A ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=A ,log_mel="""dB""" ,) return log_mel_spectrogram.T def __lowercase ( self : Optional[Any] ,A : Union[str, Any] ,A : int ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : List[str] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : int = [0] # randomly choose index for each part UpperCAmelCase__ : Tuple = np.random.choice(ranges[0] ) UpperCAmelCase__ : Tuple = np.random.choice(ranges[1] ) UpperCAmelCase__ : str = np.random.choice(ranges[2] ) UpperCAmelCase__ : List[str] = mel[idx_front : idx_front + chunk_frames, :] UpperCAmelCase__ : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] UpperCAmelCase__ : Dict = mel[idx_back : idx_back + chunk_frames, :] UpperCAmelCase__ : Optional[Any] = torch.tensor(mel[None, None, :] ) UpperCAmelCase__ : int = torch.nn.functional.interpolate( A ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=A ) UpperCAmelCase__ : Dict = mel_shrink[0][0].numpy() UpperCAmelCase__ : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def __lowercase ( self : Any ,A : np.array ,A : Optional[int] ,A : Any ,A : Tuple ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": UpperCAmelCase__ : int = True # random crop to max_length (for compatibility) -> this should be handled by self.pad UpperCAmelCase__ : str = len(A ) - max_length UpperCAmelCase__ : Optional[Any] = np.random.randint(0 ,overflow + 1 ) UpperCAmelCase__ : Optional[int] = waveform[idx : idx + max_length] UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": UpperCAmelCase__ : Tuple = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed UpperCAmelCase__ : int = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. UpperCAmelCase__ : List[Any] = np.stack([mel, mel, mel, mel] ,axis=0 ) UpperCAmelCase__ : Any = False else: UpperCAmelCase__ : Union[str, Any] = self._random_mel_fusion(A ,A ,A ) UpperCAmelCase__ : List[str] = True else: raise NotImplementedError(f"data_truncating {truncation} not implemented" ) else: UpperCAmelCase__ : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": UpperCAmelCase__ : str = int(max_length / len(A ) ) UpperCAmelCase__ : int = np.stack(np.tile(A ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": UpperCAmelCase__ : List[Any] = int(max_length / len(A ) ) UpperCAmelCase__ : str = np.stack(np.tile(A ,A ) ) UpperCAmelCase__ : Optional[Any] = np.pad(A ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": UpperCAmelCase__ : int = self._np_extract_fbank_features(A ,self.mel_filters ) UpperCAmelCase__ : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : str ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : str = None ,A : Optional[str] = None ,A : Optional[int] = None ,A : Optional[int] = None ,A : Optional[Union[str, TensorType]] = None ,**A : List[str] ,): '''simple docstring''' UpperCAmelCase__ : Optional[int] = truncation if truncation is not None else self.truncation UpperCAmelCase__ : Dict = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[int] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : List[str] = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Any = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [np.asarray(A )] # convert to mel spectrogram, truncate and pad if needed. UpperCAmelCase__ : Tuple = [ self._get_input_mel(A ,max_length if max_length else self.nb_max_samples ,A ,A ) for waveform in raw_speech ] UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Tuple = [] for mel, longer in padded_inputs: input_mel.append(A ) is_longer.append(A ) if truncation == "fusion" and sum(A ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer UpperCAmelCase__ : List[str] = np.random.randint(0 ,len(A ) ) UpperCAmelCase__ : int = True if isinstance(input_mel[0] ,A ): UpperCAmelCase__ : Tuple = [np.asarray(A ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool UpperCAmelCase__ : List[str] = [[longer] for longer in is_longer] UpperCAmelCase__ : List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} UpperCAmelCase__ : str = BatchFeature(A ) if return_tensors is not None: UpperCAmelCase__ : int = input_features.convert_to_tensors(A ) return input_features
65
1
"""simple docstring""" import random def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = [], [], [] for element in data: if element < pivot: less.append(__UpperCamelCase ) elif element > pivot: greater.append(__UpperCamelCase ) else: equal.append(__UpperCamelCase ) return less, equal, greater def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if index >= len(__UpperCamelCase ) or index < 0: return None UpperCAmelCase__ : List[Any] = items[random.randint(0 , len(__UpperCamelCase ) - 1 )] UpperCAmelCase__ : Optional[Any] = 0 UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = _partition(__UpperCamelCase , __UpperCamelCase ) UpperCAmelCase__ : Tuple = len(__UpperCamelCase ) UpperCAmelCase__ : Optional[Any] = len(__UpperCamelCase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__UpperCamelCase , __UpperCamelCase ) # must be in larger else: return quick_select(__UpperCamelCase , index - (m + count) )
65
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] ,A : Union[str, Any] ,A : Dict=7 ,A : Optional[int]=3 ,A : List[str]=18 ,A : Union[str, Any]=30 ,A : Tuple=400 ,A : Dict=True ,A : List[str]=None ,A : str=True ,A : Optional[Any]=False ,A : Optional[Any]=True ,A : List[str]=True ,A : Optional[int]=[0.5, 0.5, 0.5] ,A : List[str]=[0.5, 0.5, 0.5] ,): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : Union[str, Any] = image_size UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : Optional[int] = max_resolution UpperCAmelCase__ : str = do_resize UpperCAmelCase__ : Tuple = size if size is not None else {"""height""": 18, """width""": 20} UpperCAmelCase__ : List[str] = do_thumbnail UpperCAmelCase__ : Optional[int] = do_align_axis UpperCAmelCase__ : Union[str, Any] = do_pad UpperCAmelCase__ : Tuple = do_normalize UpperCAmelCase__ : Optional[Any] = image_mean UpperCAmelCase__ : List[Any] = image_std def __lowercase ( self : Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = DonutImageProcessor if is_vision_available() else None def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = DonutImageProcessingTester(self ) @property def __lowercase ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"""do_resize""" ) ) self.assertTrue(hasattr(A ,"""size""" ) ) self.assertTrue(hasattr(A ,"""do_thumbnail""" ) ) self.assertTrue(hasattr(A ,"""do_align_long_axis""" ) ) self.assertTrue(hasattr(A ,"""do_pad""" ) ) self.assertTrue(hasattr(A ,"""do_normalize""" ) ) self.assertTrue(hasattr(A ,"""image_mean""" ) ) self.assertTrue(hasattr(A ,"""image_std""" ) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 20} ) UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) ) self.assertEqual(image_processor.size ,{"""height""": 84, """width""": 42} ) def __lowercase ( self : Dict ): '''simple docstring''' pass @is_flaky() def __lowercase ( self : int ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : Tuple = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) @is_flaky() def __lowercase ( self : List[str] ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : Optional[int] = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) @is_flaky() def __lowercase ( self : Any ): '''simple docstring''' # Initialize image_processing UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched UpperCAmelCase__ : List[Any] = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,)
65
1
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __UpperCAmelCase = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$') @total_ordering @dataclass class __lowercase : snake_case_ = 42 snake_case_ = None snake_case_ = None snake_case_ = None snake_case_ = None def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = _str_to_version_tuple(self.version_str ) def __repr__( self : Tuple ): '''simple docstring''' return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}" @property def __lowercase ( self : List[str] ): '''simple docstring''' return self.major, self.minor, self.patch def __lowercase ( self : Any ,A : List[Any] ): '''simple docstring''' if isinstance(A ,A ): return Version(A ) elif isinstance(A ,A ): return other raise TypeError(f"{other} (type {type(A )}) cannot be compared to version." ) def __eq__( self : Union[str, Any] ,A : List[str] ): '''simple docstring''' try: UpperCAmelCase__ : Optional[Any] = self._validate_operand(A ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : str ,A : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = self._validate_operand(A ) return self.tuple < other.tuple def __hash__( self : Union[str, Any] ): '''simple docstring''' return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def __lowercase ( cls : Any ,A : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def __lowercase ( self : List[Any] ): '''simple docstring''' return self.version_str def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = _VERSION_REG.match(__UpperCamelCase ) if not res: raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." ) return tuple(int(__UpperCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return ".".join(str(__UpperCamelCase ) for v in version_tuple )
65
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class __lowercase ( __lowerCamelCase ): snake_case_ = """open-llama""" def __init__( self : Dict ,A : str=100_000 ,A : str=4_096 ,A : Optional[Any]=11_008 ,A : Tuple=32 ,A : str=32 ,A : Optional[int]="silu" ,A : List[Any]=2_048 ,A : str=0.0_2 ,A : Optional[int]=1e-6 ,A : int=True ,A : Tuple=0 ,A : str=1 ,A : Any=2 ,A : Optional[Any]=False ,A : int=True ,A : Any=0.1 ,A : Optional[Any]=0.1 ,A : Optional[Any]=True ,A : Union[str, Any]=True ,A : Tuple=None ,**A : Optional[int] ,): '''simple docstring''' UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : Optional[int] = rms_norm_eps UpperCAmelCase__ : Any = use_cache UpperCAmelCase__ : Optional[Any] = kwargs.pop( """use_memorry_efficient_attention""" ,A ) UpperCAmelCase__ : Any = hidden_dropout_prob UpperCAmelCase__ : str = attention_dropout_prob UpperCAmelCase__ : Optional[int] = use_stable_embedding UpperCAmelCase__ : Tuple = shared_input_output_embedding UpperCAmelCase__ : Tuple = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,tie_word_embeddings=A ,**A ,) def __lowercase ( self : Optional[Any] ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,A ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f"got {self.rope_scaling}" ) UpperCAmelCase__ : List[Any] = self.rope_scaling.get("""type""" ,A ) UpperCAmelCase__ : int = self.rope_scaling.get("""factor""" ,A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(A ,A ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
65
1
"""simple docstring""" import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __lowercase ( unittest.TestCase ): def __lowercase ( self : Any ): '''simple docstring''' # A mock response for an HTTP head request to emulate server down UpperCAmelCase__ : Optional[int] = mock.Mock() UpperCAmelCase__ : List[Any] = 500 UpperCAmelCase__ : int = {} UpperCAmelCase__ : List[str] = HTTPError UpperCAmelCase__ : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase__ : int = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=A ) as mock_head: UpperCAmelCase__ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def __lowercase ( self : int ): '''simple docstring''' # A mock response for an HTTP head request to emulate server down UpperCAmelCase__ : List[str] = mock.Mock() UpperCAmelCase__ : List[str] = 500 UpperCAmelCase__ : Optional[int] = {} UpperCAmelCase__ : Any = HTTPError UpperCAmelCase__ : Any = {} # Download this model to make sure it's in the cache. UpperCAmelCase__ : List[Any] = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=A ) as mock_head: UpperCAmelCase__ : List[Any] = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def __lowercase ( self : List[str] ): '''simple docstring''' # This test is for deprecated behavior and can be removed in v5 try: UpperCAmelCase__ : List[str] = tempfile.mktemp() with open(A ,"""wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,A ) UpperCAmelCase__ : Any = AlbertTokenizer.from_pretrained(A ) finally: os.remove(A ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" ,"""wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,A ) UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def __lowercase ( self : List[str] ): '''simple docstring''' # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase__ : Tuple = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class __lowercase ( unittest.TestCase ): snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def __lowercase ( cls : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = TOKEN HfFolder.save_token(A ) @classmethod def __lowercase ( cls : List[Any] ): '''simple docstring''' try: delete_repo(token=cls._token ,repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def __lowercase ( self : int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : List[Any] = os.path.join(A ,"""vocab.txt""" ) with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase__ : List[Any] = BertTokenizer(A ) tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase__ : List[Any] = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A ,repo_id="""test-tokenizer""" ,push_to_hub=A ,use_auth_token=self._token ) UpperCAmelCase__ : str = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def __lowercase ( self : List[str] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Dict = os.path.join(A ,"""vocab.txt""" ) with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase__ : Union[str, Any] = BertTokenizer(A ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token ) UpperCAmelCase__ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( A ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=A ,use_auth_token=self._token ) UpperCAmelCase__ : List[str] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def __lowercase ( self : str ): '''simple docstring''' CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Optional[Any] = os.path.join(A ,"""vocab.txt""" ) with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase__ : Tuple = CustomTokenizer(A ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" ,trust_remote_code=A ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : List[str] = os.path.join(A ,"""vocab.txt""" ) with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase__ : Dict = BertTokenizerFast.from_pretrained(A ) bert_tokenizer.save_pretrained(A ) UpperCAmelCase__ : Dict = CustomTokenizerFast.from_pretrained(A ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" ,trust_remote_code=A ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained( f"{USER}/test-dynamic-tokenizer" ,use_fast=A ,trust_remote_code=A ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) class __lowercase ( unittest.TestCase ): def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Tuple = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' # Even if the offsets are wrong, we necessarily output correct string # parts. UpperCAmelCase__ : Dict = Trie() UpperCAmelCase__ : Optional[int] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(A ,["""AB""", """C"""] )
65
"""simple docstring""" from collections.abc import Callable class __lowercase : def __init__( self : Tuple ,A : Callable | None = None ): '''simple docstring''' # Stores actual heap items. UpperCAmelCase__ : list = [] # Stores indexes of each item for supporting updates and deletion. UpperCAmelCase__ : dict = {} # Stores current size of heap. UpperCAmelCase__ : Any = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. UpperCAmelCase__ : int = key or (lambda A : x) def __lowercase ( self : Union[str, Any] ,A : int ): '''simple docstring''' return int((i - 1) / 2 ) if i > 0 else None def __lowercase ( self : Tuple ,A : int ): '''simple docstring''' UpperCAmelCase__ : Any = int(2 * i + 1 ) return left if 0 < left < self.size else None def __lowercase ( self : Any ,A : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = int(2 * i + 2 ) return right if 0 < right < self.size else None def __lowercase ( self : List[Any] ,A : int ,A : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.arr[j], self.arr[i] def __lowercase ( self : Optional[int] ,A : int ,A : int ): '''simple docstring''' return self.arr[i][1] < self.arr[j][1] def __lowercase ( self : Optional[int] ,A : int ): '''simple docstring''' UpperCAmelCase__ : int = self._left(A ) UpperCAmelCase__ : Dict = self._right(A ) UpperCAmelCase__ : Optional[int] = i if left is not None and not self._cmp(A ,A ): UpperCAmelCase__ : List[Any] = left if right is not None and not self._cmp(A ,A ): UpperCAmelCase__ : List[Any] = right return valid_parent def __lowercase ( self : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : int = self._parent(A ) while parent is not None and not self._cmp(A ,A ): self._swap(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : int = parent, self._parent(A ) def __lowercase ( self : str ,A : int ): '''simple docstring''' UpperCAmelCase__ : Any = self._get_valid_parent(A ) while valid_parent != index: self._swap(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = valid_parent, self._get_valid_parent(A ) def __lowercase ( self : Optional[Any] ,A : int ,A : int ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase__ : Tuple = self.pos_map[item] UpperCAmelCase__ : Dict = [item, self.key(A )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(A ) self._heapify_down(A ) def __lowercase ( self : List[Any] ,A : int ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase__ : Any = self.pos_map[item] del self.pos_map[item] UpperCAmelCase__ : Dict = self.arr[self.size - 1] UpperCAmelCase__ : List[Any] = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(A ) self._heapify_down(A ) def __lowercase ( self : str ,A : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : Dict = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(A )] ) else: UpperCAmelCase__ : List[str] = [item, self.key(A )] UpperCAmelCase__ : Union[str, Any] = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __lowercase ( self : str ): '''simple docstring''' return self.arr[0] if self.size else None def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def lowerCAmelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
65
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __lowercase : snake_case_ = 42 snake_case_ = 42 class __lowercase : def __init__( self : Tuple ,A : int ): '''simple docstring''' UpperCAmelCase__ : list[list[Edge]] = [[] for _ in range(A )] UpperCAmelCase__ : List[Any] = size def __getitem__( self : Union[str, Any] ,A : int ): '''simple docstring''' return iter(self._graph[vertex] ) @property def __lowercase ( self : str ): '''simple docstring''' return self._size def __lowercase ( self : List[str] ,A : int ,A : int ,A : int ): '''simple docstring''' if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(A ,A ) ) def __lowercase ( self : Tuple ,A : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = deque([start_vertex] ) UpperCAmelCase__ : list[int | None] = [None] * self.size UpperCAmelCase__ : Dict = 0 while queue: UpperCAmelCase__ : int = queue.popleft() UpperCAmelCase__ : Any = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: UpperCAmelCase__ : Optional[Any] = current_distance + edge.weight UpperCAmelCase__ : Dict = distances[edge.destination_vertex] if ( isinstance(A ,A ) and new_distance >= dest_vertex_distance ): continue UpperCAmelCase__ : Union[str, Any] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
65
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """attention_mask"""] def __init__( self : Any ,A : str=80 ,A : Optional[int]=16_000 ,A : int=0.0 ,A : str=10 ,A : Any=25 ,A : str="hamming_window" ,A : int=3_2_7_6_8.0 ,A : List[str]=0.9_7 ,A : Optional[int]=1.0 ,A : Optional[Any]=True ,A : Tuple=True ,A : Any=False ,**A : int ,): '''simple docstring''' super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A ) UpperCAmelCase__ : str = feature_size UpperCAmelCase__ : int = sampling_rate UpperCAmelCase__ : int = padding_value UpperCAmelCase__ : Dict = hop_length UpperCAmelCase__ : int = win_length UpperCAmelCase__ : Dict = frame_signal_scale UpperCAmelCase__ : Dict = preemphasis_coeff UpperCAmelCase__ : str = mel_floor UpperCAmelCase__ : Any = normalize_means UpperCAmelCase__ : str = normalize_vars UpperCAmelCase__ : int = win_function UpperCAmelCase__ : List[Any] = return_attention_mask UpperCAmelCase__ : str = win_length * sampling_rate // 1_000 UpperCAmelCase__ : List[Any] = hop_length * sampling_rate // 1_000 UpperCAmelCase__ : int = optimal_fft_length(self.sample_size ) UpperCAmelCase__ : List[Any] = (self.n_fft // 2) + 1 def __lowercase ( self : Union[str, Any] ,A : np.array ): '''simple docstring''' if self.win_function == "hamming_window": UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A ) else: UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ) UpperCAmelCase__ : Union[str, Any] = mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,) UpperCAmelCase__ : Optional[Any] = spectrogram( one_waveform * self.frame_signal_scale ,window=A ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=A ,preemphasis=self.preemphasis_coeff ,mel_filters=A ,mel_floor=self.mel_floor ,log_mel="""log""" ,) return msfc_features.T def __lowercase ( self : str ,A : Any ,A : Optional[int] ,A : str ): '''simple docstring''' # make sure we normalize float32 arrays if self.normalize_means: UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 ) UpperCAmelCase__ : Any = np.subtract(A ,A ) if self.normalize_vars: UpperCAmelCase__ : str = x[:input_length].std(axis=0 ) UpperCAmelCase__ : Optional[int] = np.divide(A ,A ) if input_length < x.shape[0]: UpperCAmelCase__ : int = padding_value # make sure array is in float32 UpperCAmelCase__ : str = x.astype(np.floataa ) return x def __lowercase ( self : Union[str, Any] ,A : List[np.ndarray] ,A : Optional[np.ndarray] = None ): '''simple docstring''' UpperCAmelCase__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(A ,A ,self.padding_value ) for x, n in zip(A ,A )] def __call__( self : Union[str, Any] ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : Union[bool, str, PaddingStrategy] = False ,A : Optional[int] = None ,A : bool = False ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[Union[str, TensorType]] = None ,A : Optional[int] = None ,**A : Tuple ,): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[Any] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : Any = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : List[str] = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Union[str, Any] = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [raw_speech] # extract fbank features UpperCAmelCase__ : Tuple = [self._extract_mfsc_features(A ) for one_waveform in raw_speech] # convert into correct format for padding UpperCAmelCase__ : str = BatchFeature({"""input_features""": features} ) UpperCAmelCase__ : Optional[Any] = self.pad( A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,) # make sure list is in array format UpperCAmelCase__ : Tuple = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] ,A ): UpperCAmelCase__ : Union[str, Any] = [np.asarray(A ,dtype=np.floataa ) for feature in input_features] UpperCAmelCase__ : Dict = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: UpperCAmelCase__ : Union[str, Any] = ( np.array(A ,dtype=np.intaa ) if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) UpperCAmelCase__ : Any = self.normalize( padded_inputs["""input_features"""] ,attention_mask=A ) if return_tensors is not None: UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(A ) return padded_inputs
65
1
"""simple docstring""" import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __UpperCAmelCase = logging.getLogger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = """sequence-classification""" def __init__( self : str ,A : int ): '''simple docstring''' if type(A ) == dict: UpperCAmelCase__ : Tuple = Namespace(**A ) UpperCAmelCase__ : List[str] = glue_output_modes[hparams.task] UpperCAmelCase__ : Optional[int] = glue_tasks_num_labels[hparams.task] super().__init__(A ,A ,self.mode ) def __lowercase ( self : int ,**A : Dict ): '''simple docstring''' return self.model(**A ) def __lowercase ( self : Dict ,A : Any ,A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: UpperCAmelCase__ : Optional[int] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None UpperCAmelCase__ : Union[str, Any] = self(**A ) UpperCAmelCase__ : Dict = outputs[0] UpperCAmelCase__ : Union[str, Any] = self.trainer.lr_schedulers[0]["""scheduler"""] UpperCAmelCase__ : Optional[Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.hparams UpperCAmelCase__ : str = processors[args.task]() UpperCAmelCase__ : List[Any] = processor.get_labels() for mode in ["train", "dev"]: UpperCAmelCase__ : Union[str, Any] = self._feature_file(A ) if os.path.exists(A ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" ,A ) else: logger.info("""Creating features from dataset file at %s""" ,args.data_dir ) UpperCAmelCase__ : Optional[int] = ( processor.get_dev_examples(args.data_dir ) if mode == """dev""" else processor.get_train_examples(args.data_dir ) ) UpperCAmelCase__ : Tuple = convert_examples_to_features( A ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,) logger.info("""Saving features into cached file %s""" ,A ) torch.save(A ,A ) def __lowercase ( self : List[Any] ,A : str ,A : int ,A : bool = False ): '''simple docstring''' UpperCAmelCase__ : str = """dev""" if mode == """test""" else mode UpperCAmelCase__ : Dict = self._feature_file(A ) logger.info("""Loading features from cached file %s""" ,A ) UpperCAmelCase__ : Any = torch.load(A ) UpperCAmelCase__ : Dict = torch.tensor([f.input_ids for f in features] ,dtype=torch.long ) UpperCAmelCase__ : Dict = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long ) UpperCAmelCase__ : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long ) if self.hparams.glue_output_mode == "classification": UpperCAmelCase__ : str = torch.tensor([f.label for f in features] ,dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": UpperCAmelCase__ : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.float ) return DataLoader( TensorDataset(A ,A ,A ,A ) ,batch_size=A ,shuffle=A ,) def __lowercase ( self : List[str] ,A : Union[str, Any] ,A : str ): '''simple docstring''' UpperCAmelCase__ : Dict = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: UpperCAmelCase__ : List[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None UpperCAmelCase__ : Dict = self(**A ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = outputs[:2] UpperCAmelCase__ : Optional[Any] = logits.detach().cpu().numpy() UpperCAmelCase__ : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __lowercase ( self : Dict ,A : Any ): '''simple docstring''' UpperCAmelCase__ : str = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item() UpperCAmelCase__ : Optional[int] = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 ) if self.hparams.glue_output_mode == "classification": UpperCAmelCase__ : str = np.argmax(A ,axis=1 ) elif self.hparams.glue_output_mode == "regression": UpperCAmelCase__ : Dict = np.squeeze(A ) UpperCAmelCase__ : List[str] = np.concatenate([x["""target"""] for x in outputs] ,axis=0 ) UpperCAmelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )] UpperCAmelCase__ : int = [[] for _ in range(out_label_ids.shape[0] )] UpperCAmelCase__ : List[Any] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,A ,A )} UpperCAmelCase__ : Any = dict(results.items() ) UpperCAmelCase__ : Optional[int] = results return ret, preds_list, out_label_list def __lowercase ( self : int ,A : list ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = self._eval_end(A ) UpperCAmelCase__ : str = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __lowercase ( self : Any ,A : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self._eval_end(A ) UpperCAmelCase__ : Union[str, Any] = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __lowercase ( A : List[str] ,A : Union[str, Any] ): '''simple docstring''' BaseTransformer.add_model_specific_args(A ,A ) parser.add_argument( """--max_seq_length""" ,default=128 ,type=A ,help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) ,) parser.add_argument( """--task""" ,default="""""" ,type=A ,required=A ,help="""The GLUE task to run""" ,) parser.add_argument( """--gpus""" ,default=0 ,type=A ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,) parser.add_argument( """--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" ) return parser def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : List[str] = argparse.ArgumentParser() add_generic_args(__UpperCamelCase , os.getcwd() ) UpperCAmelCase__ : Tuple = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() ) UpperCAmelCase__ : Optional[int] = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: UpperCAmelCase__ : Optional[int] = os.path.join( """./results""" , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , ) os.makedirs(args.output_dir ) UpperCAmelCase__ : str = GLUETransformer(__UpperCamelCase ) UpperCAmelCase__ : Tuple = generic_train(__UpperCamelCase , __UpperCamelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: UpperCAmelCase__ : Tuple = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__UpperCamelCase ) ) UpperCAmelCase__ : Optional[int] = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(__UpperCamelCase ) if __name__ == "__main__": main()
65
"""simple docstring""" from math import factorial def lowerCAmelCase ( __UpperCamelCase = 100 ): '''simple docstring''' return sum(int(__UpperCamelCase ) for x in str(factorial(__UpperCamelCase ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
65
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class __lowercase ( __lowerCamelCase ): snake_case_ = """open-llama""" def __init__( self : Dict ,A : str=100_000 ,A : str=4_096 ,A : Optional[Any]=11_008 ,A : Tuple=32 ,A : str=32 ,A : Optional[int]="silu" ,A : List[Any]=2_048 ,A : str=0.0_2 ,A : Optional[int]=1e-6 ,A : int=True ,A : Tuple=0 ,A : str=1 ,A : Any=2 ,A : Optional[Any]=False ,A : int=True ,A : Any=0.1 ,A : Optional[Any]=0.1 ,A : Optional[Any]=True ,A : Union[str, Any]=True ,A : Tuple=None ,**A : Optional[int] ,): '''simple docstring''' UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : Optional[int] = rms_norm_eps UpperCAmelCase__ : Any = use_cache UpperCAmelCase__ : Optional[Any] = kwargs.pop( """use_memorry_efficient_attention""" ,A ) UpperCAmelCase__ : Any = hidden_dropout_prob UpperCAmelCase__ : str = attention_dropout_prob UpperCAmelCase__ : Optional[int] = use_stable_embedding UpperCAmelCase__ : Tuple = shared_input_output_embedding UpperCAmelCase__ : Tuple = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,tie_word_embeddings=A ,**A ,) def __lowercase ( self : Optional[Any] ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,A ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f"got {self.rope_scaling}" ) UpperCAmelCase__ : List[Any] = self.rope_scaling.get("""type""" ,A ) UpperCAmelCase__ : int = self.rope_scaling.get("""factor""" ,A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(A ,A ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
65
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class __lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] ,A : Optional[int] ,A : int=13 ,A : Tuple=7 ,A : Dict=True ,A : Optional[int]=True ,A : Tuple=True ,A : str=True ,A : Any=99 ,A : Tuple=32 ,A : Dict=5 ,A : Optional[int]=4 ,A : Dict=37 ,A : Any="gelu" ,A : Any=0.1 ,A : Optional[int]=0.1 ,A : Union[str, Any]=512 ,A : Any=16 ,A : List[str]=2 ,A : List[Any]=0.0_2 ,A : Optional[int]=4 ,): '''simple docstring''' UpperCAmelCase__ : Dict = parent UpperCAmelCase__ : Any = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : Optional[int] = is_training UpperCAmelCase__ : Optional[Any] = use_attention_mask UpperCAmelCase__ : int = use_token_type_ids UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : Any = vocab_size UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Dict = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : Any = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : List[Any] = type_vocab_size UpperCAmelCase__ : List[str] = type_sequence_label_size UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = num_choices def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase__ : List[str] = None if self.use_attention_mask: UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : int = DistilBertConfig( vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=A ,) return config, input_ids, attention_mask def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs UpperCAmelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = FlaxDistilBertModelTester(self ) @slow def __lowercase ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" ) UpperCAmelCase__ : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(A ) @require_flax class __lowercase ( unittest.TestCase ): @slow def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) UpperCAmelCase__ : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) UpperCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCAmelCase__ : Dict = model(A ,attention_mask=A )[0] UpperCAmelCase__ : List[Any] = (1, 11, 768) self.assertEqual(output.shape ,A ) UpperCAmelCase__ : Any = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,A ,atol=1e-4 ) )
65
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json' # See all FNet models at https://huggingface.co/models?filter=fnet } class __lowercase ( __lowerCamelCase ): snake_case_ = """fnet""" def __init__( self : Optional[Any] ,A : Tuple=32_000 ,A : str=768 ,A : List[str]=12 ,A : List[Any]=3_072 ,A : Optional[Any]="gelu_new" ,A : Tuple=0.1 ,A : List[str]=512 ,A : Any=4 ,A : Optional[int]=0.0_2 ,A : Optional[int]=1e-12 ,A : List[Any]=False ,A : str=512 ,A : List[Any]=3 ,A : int=1 ,A : List[str]=2 ,**A : Optional[Any] ,): '''simple docstring''' super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A ) UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : Optional[Any] = max_position_embeddings UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Optional[int] = intermediate_size UpperCAmelCase__ : Union[str, Any] = hidden_act UpperCAmelCase__ : Dict = hidden_dropout_prob UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : Dict = type_vocab_size UpperCAmelCase__ : Any = layer_norm_eps UpperCAmelCase__ : Optional[int] = use_tpu_fourier_optimizations UpperCAmelCase__ : Optional[Any] = tpu_short_seq_length
65
"""simple docstring""" __UpperCAmelCase = frozenset( [ 'prompt', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image']) __UpperCAmelCase = frozenset( [ 'prompt', 'image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # Text guided image variation with an image mask 'prompt', 'image', 'mask_image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) __UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt']) __UpperCAmelCase = frozenset( [ # image variation with an image mask 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['image', 'mask_image']) __UpperCAmelCase = frozenset( [ 'example_image', 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) __UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['class_labels']) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset(['batch_size']) __UpperCAmelCase = frozenset([]) __UpperCAmelCase = frozenset( [ 'prompt', 'audio_length_in_s', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) __UpperCAmelCase = frozenset(['prompt', 'negative_prompt']) __UpperCAmelCase = frozenset(['input_tokens']) __UpperCAmelCase = frozenset(['input_tokens'])
65
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __UpperCAmelCase = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
65
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __lowercase ( unittest.TestCase ): def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split() UpperCAmelCase__ : Tuple = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[Any] = { """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>""", } UpperCAmelCase__ : int = { """feature_size""": 1, """padding_value""": 0.0, """sampling_rate""": 16_000, """return_attention_mask""": False, """do_normalize""": True, } UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp() UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,A ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) # load decoder from hub UpperCAmelCase__ : int = """hf-internal-testing/ngram-beam-search-decoder""" def __lowercase ( self : str ,**A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.add_kwargs_tokens_map.copy() kwargs.update(A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : List[str] ,**A : Dict ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Any ,**A : List[Any] ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**A ) def __lowercase ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,A ) # decoder self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,) self.assertIsInstance(processor.decoder ,A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha ,5.0 ) self.assertEqual(processor.language_model.beta ,3.0 ) self.assertEqual(processor.language_model.score_boundary ,-7.0 ) self.assertEqual(processor.language_model.unk_score_offset ,3 ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["""xx"""] ) with self.assertRaisesRegex(A ,"""include""" ): WavaVecaProcessorWithLM( tokenizer=A ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) def __lowercase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : str = floats_list((3, 1_000) ) UpperCAmelCase__ : Optional[Any] = feature_extractor(A ,return_tensors="""np""" ) UpperCAmelCase__ : List[Any] = processor(A ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[int] = self.get_decoder() UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : List[Any] = """This is a test string""" UpperCAmelCase__ : int = processor(text=A ) UpperCAmelCase__ : Dict = tokenizer(A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def __lowercase ( self : Tuple ,A : List[Any]=(2, 10, 16) ,A : Dict=77 ): '''simple docstring''' np.random.seed(A ) return np.random.rand(*A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Dict = self._get_dummy_logits(shape=(10, 16) ,seed=13 ) UpperCAmelCase__ : Tuple = processor.decode(A ) UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(A )[0] self.assertEqual(decoded_decoder[0] ,decoded_processor.text ) self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text ) self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score ) @parameterized.expand([[None], ["""fork"""], ["""spawn"""]] ) def __lowercase ( self : List[str] ,A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : List[str] = processor.batch_decode(A ) else: with get_context(A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(A ,A ) UpperCAmelCase__ : Optional[Any] = list(A ) with get_context("""fork""" ).Pool() as p: UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams_batch(A ,A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(A ,decoded_processor.text ) self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text ) self.assertListEqual(A ,decoded_processor.logit_score ) self.assertListEqual(A ,decoded_processor.lm_score ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Any = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : List[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,) UpperCAmelCase__ : List[str] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(A ) with get_context("""fork""" ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( A ,A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,) UpperCAmelCase__ : List[Any] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : List[str] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(A ,A ) self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,A ) self.assertTrue(np.array_equal(A ,decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,A ,atol=1e-3 ) ) self.assertTrue(np.array_equal(A ,decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,A ,atol=1e-3 ) ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : Tuple = 2.0 UpperCAmelCase__ : str = 5.0 UpperCAmelCase__ : Union[str, Any] = -2_0.0 UpperCAmelCase__ : Optional[Any] = True UpperCAmelCase__ : str = processor.batch_decode( A ,alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,) UpperCAmelCase__ : Any = decoded_processor_out.text UpperCAmelCase__ : Union[str, Any] = list(A ) decoder.reset_params( alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,) with get_context("""fork""" ).Pool() as pool: UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch( A ,A ,) UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(A ,A ) self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,A ) UpperCAmelCase__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha ,2.0 ) self.assertEqual(lm_model.beta ,5.0 ) self.assertEqual(lm_model.unk_score_offset ,-2_0.0 ) self.assertEqual(lm_model.score_boundary ,A ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : str = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCAmelCase__ : Optional[int] = os.listdir(A ) UpperCAmelCase__ : List[Any] = ["""alphabet.json""", """language_model"""] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(A ,A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(A ) UpperCAmelCase__ : Tuple = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCAmelCase__ : Tuple = os.listdir(A ) UpperCAmelCase__ : Dict = os.listdir(A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(A ,A ) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Dict = floats_list((3, 1_000) ) UpperCAmelCase__ : List[str] = processor_wavaveca(A ,return_tensors="""np""" ) UpperCAmelCase__ : Dict = processor_auto(A ,return_tensors="""np""" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 ) UpperCAmelCase__ : List[str] = self._get_dummy_logits() UpperCAmelCase__ : Tuple = processor_wavaveca.batch_decode(A ) UpperCAmelCase__ : List[str] = processor_auto.batch_decode(A ) self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A ) self.assertListEqual( processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,) @staticmethod def __lowercase ( A : Optional[Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets] return retrieved_list def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : Dict = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(A ,output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(A ,A ) ) self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCAmelCase__ : int = self._get_dummy_logits() UpperCAmelCase__ : Any = processor.batch_decode(A ,output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(A ,A ) ) self.assertListEqual( [""" """.join(self.get_from_offsets(A ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] ) @slow @require_torch @require_torchaudio def __lowercase ( self : Tuple ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=A ) UpperCAmelCase__ : Tuple = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : Tuple = iter(A ) UpperCAmelCase__ : Optional[int] = next(A ) UpperCAmelCase__ : List[Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) UpperCAmelCase__ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : Tuple = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(A ).logits.cpu().numpy() UpperCAmelCase__ : Any = processor.decode(logits[0] ,output_word_offsets=A ) UpperCAmelCase__ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Union[str, Any] = [ { """start_time""": d["""start_offset"""] * time_offset, """end_time""": d["""end_offset"""] * time_offset, """word""": d["""word"""], } for d in output["""word_offsets"""] ] UpperCAmelCase__ : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL""" # output words self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,A ) self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,output.text ) # output times UpperCAmelCase__ : str = torch.tensor(self.get_from_offsets(A ,"""start_time""" ) ) UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(A ,"""end_time""" ) ) # fmt: off UpperCAmelCase__ : Union[str, Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) ) self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
65
1