code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase( snake_case_ , unittest.TestCase ): """simple docstring""" a : Optional[Any] = DiTPipeline a : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS a : Dict = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } a : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS a : Optional[Any] = False def __a ( self ) -> Any: """simple docstring""" torch.manual_seed(0 ) lowercase__ : Optional[Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCamelCase , ) lowercase__ : str = AutoencoderKL() lowercase__ : Optional[int] = DDIMScheduler() lowercase__ : List[str] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def __a ( self , lowerCamelCase , lowerCamelCase=0 ) -> Optional[Any]: """simple docstring""" if str(lowerCamelCase ).startswith("mps" ): lowercase__ : List[Any] = torch.manual_seed(lowerCamelCase ) else: lowercase__ : int = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) lowercase__ : List[str] = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self ) -> Any: """simple docstring""" lowercase__ : Union[str, Any] = "cpu" lowercase__ : Optional[int] = self.get_dummy_components() lowercase__ : Tuple = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) lowercase__ : Optional[Any] = self.get_dummy_inputs(lowerCamelCase ) lowercase__ : str = pipe(**lowerCamelCase ).images lowercase__ : Optional[Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowercase__ : Dict = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] ) lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase , 1E-3 ) def __a ( self ) -> str: """simple docstring""" self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self ) -> int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class UpperCAmelCase( unittest.TestCase ): """simple docstring""" def __a ( self ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ) -> List[str]: """simple docstring""" lowercase__ : Dict = torch.manual_seed(0 ) lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) lowercase__ : Optional[Any] = ["vase", "umbrella", "white shark", "white wolf"] lowercase__ : str = pipe.get_label_ids(lowerCamelCase ) lowercase__ : Dict = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="np" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): lowercase__ : List[str] = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-2 def __a ( self ) -> Optional[Any]: """simple docstring""" lowercase__ : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) lowercase__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) lowercase__ : Tuple = ["vase", "umbrella"] lowercase__ : str = pipe.get_label_ids(lowerCamelCase ) lowercase__ : Dict = torch.manual_seed(0 ) lowercase__ : int = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="np" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): lowercase__ : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-1
397
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch __a : List[str] = random.Random() def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ) -> Optional[int]: if rng is None: lowercase__ : Optional[Any] = global_rng lowercase__ : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class UpperCAmelCase( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=80 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase="hann_window" , lowerCamelCase=80 , lowerCamelCase=7600 , lowerCamelCase=1E-10 , lowerCamelCase=True , ) -> int: """simple docstring""" lowercase__ : Optional[int] = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Dict = min_seq_length lowercase__ : Optional[int] = max_seq_length lowercase__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase__ : List[Any] = feature_size lowercase__ : Union[str, Any] = padding_value lowercase__ : Dict = sampling_rate lowercase__ : int = do_normalize lowercase__ : Union[str, Any] = num_mel_bins lowercase__ : Optional[Any] = hop_length lowercase__ : Tuple = win_length lowercase__ : Any = win_function lowercase__ : Optional[Any] = fmin lowercase__ : str = fmax lowercase__ : Union[str, Any] = mel_floor lowercase__ : str = return_attention_mask def __a ( self ) -> Any: """simple docstring""" return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def __a ( self , lowerCamelCase=False , lowerCamelCase=False ) -> List[str]: """simple docstring""" def _flatten(lowerCamelCase ): return list(itertools.chain(*lowerCamelCase ) ) if equal_length: lowercase__ : Optional[int] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size lowercase__ : List[str] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowercase__ : Dict = [np.asarray(lowerCamelCase ) for x in speech_inputs] return speech_inputs def __a ( self , lowerCamelCase=False , lowerCamelCase=False ) -> Optional[int]: """simple docstring""" if equal_length: lowercase__ : Union[str, Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowercase__ : Tuple = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowercase__ : List[str] = [np.asarray(lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch class UpperCAmelCase( snake_case_ , unittest.TestCase ): """simple docstring""" a : List[Any] = SpeechTaFeatureExtractor def __a ( self ) -> Tuple: """simple docstring""" lowercase__ : Union[str, Any] = SpeechTaFeatureExtractionTester(self ) def __a ( self , lowerCamelCase ) -> List[Any]: """simple docstring""" self.assertTrue(np.all(np.mean(lowerCamelCase , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCamelCase , axis=0 ) - 1 ) < 1E-3 ) ) def __a ( self ) -> List[str]: """simple docstring""" lowercase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowercase__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ : str = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input lowercase__ : int = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values lowercase__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test batched lowercase__ : Optional[int] = feat_extract(lowerCamelCase , return_tensors="np" ).input_values lowercase__ : Union[str, Any] = feat_extract(lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) def __a ( self ) -> Any: """simple docstring""" lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ : Any = ["longest", "max_length", "do_not_pad"] lowercase__ : List[Any] = [None, 1600, None] for max_length, padding in zip(lowerCamelCase , lowerCamelCase ): lowercase__ : Optional[int] = feat_extract(lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , return_tensors="np" ) lowercase__ : List[str] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __a ( self ) -> Any: """simple docstring""" lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ : Dict = range(800 , 1400 , 200 ) lowercase__ : List[str] = [floats_list((1, x) )[0] for x in lengths] lowercase__ : Tuple = ["longest", "max_length", "do_not_pad"] lowercase__ : str = [None, 1600, None] for max_length, padding in zip(lowerCamelCase , lowerCamelCase ): lowercase__ : List[str] = feat_extract(lowerCamelCase , max_length=lowerCamelCase , padding=lowerCamelCase ) lowercase__ : Any = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def __a ( self ) -> Optional[Any]: """simple docstring""" lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ : Tuple = feat_extract( lowerCamelCase , truncation=lowerCamelCase , max_length=1000 , padding="max_length" , return_tensors="np" ) lowercase__ : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def __a ( self ) -> Any: """simple docstring""" lowercase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ : Tuple = feat_extract( lowerCamelCase , truncation=lowerCamelCase , max_length=1000 , padding="longest" , return_tensors="np" ) lowercase__ : Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) lowercase__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ : Union[str, Any] = feat_extract( lowerCamelCase , truncation=lowerCamelCase , max_length=2000 , padding="longest" , return_tensors="np" ) lowercase__ : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) def __a ( self ) -> Any: """simple docstring""" lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ : Tuple = np.random.rand(100 ).astype(np.floataa ) lowercase__ : int = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowercase__ : Tuple = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) lowercase__ : Dict = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __a ( self ) -> str: """simple docstring""" lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowercase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ : List[str] = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] # Test feature size lowercase__ : str = feature_extractor(audio_target=lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input lowercase__ : Union[str, Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values lowercase__ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test batched lowercase__ : Dict = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values lowercase__ : List[str] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowercase__ : str = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowercase__ : Optional[Any] = np.asarray(lowerCamelCase ) lowercase__ : List[Any] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values lowercase__ : List[str] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ): self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) def __a ( self ) -> str: """simple docstring""" lowercase__ : Dict = self.feat_extract_tester.prepare_inputs_for_target() lowercase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) lowercase__ : Dict = feat_extract.model_input_names[0] lowercase__ : int = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(lowerCamelCase ) == len(lowerCamelCase ) for x, y in zip(lowerCamelCase , processed_features[input_name] ) ) ) lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase ) lowercase__ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" ) lowercase__ : Optional[int] = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowercase__ : Tuple = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __a ( self ) -> Tuple: """simple docstring""" lowercase__ : Dict = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase ) lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) lowercase__ : Optional[Any] = feat_extract.model_input_names[0] lowercase__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="pt" ) lowercase__ : List[str] = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowercase__ : int = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __a ( self ) -> Tuple: """simple docstring""" lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) lowercase__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target() lowercase__ : Optional[Any] = feat_extract.model_input_names[0] lowercase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} ) lowercase__ : Optional[int] = feat_extract.num_mel_bins # hack! lowercase__ : Optional[int] = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name] lowercase__ : Optional[int] = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def __a ( self ) -> Tuple: """simple docstring""" lowercase__ : Tuple = self.feat_extract_dict lowercase__ : int = True lowercase__ : Optional[Any] = self.feature_extraction_class(**lowerCamelCase ) lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target() lowercase__ : Union[str, Any] = [len(lowerCamelCase ) for x in speech_inputs] lowercase__ : Any = feat_extract.model_input_names[0] lowercase__ : Optional[int] = BatchFeature({input_name: speech_inputs} ) lowercase__ : int = feat_extract.num_mel_bins # hack! lowercase__ : int = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" ) self.assertIn("attention_mask" , lowerCamelCase ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase ) def __a ( self ) -> Dict: """simple docstring""" lowercase__ : List[Any] = self.feat_extract_dict lowercase__ : Optional[int] = True lowercase__ : List[Any] = self.feature_extraction_class(**lowerCamelCase ) lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target() lowercase__ : List[str] = [len(lowerCamelCase ) for x in speech_inputs] lowercase__ : Any = feat_extract.model_input_names[0] lowercase__ : Dict = BatchFeature({input_name: speech_inputs} ) lowercase__ : int = min(lowerCamelCase ) lowercase__ : List[str] = feat_extract.num_mel_bins # hack! lowercase__ : Dict = feat_extract.pad( lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="np" ) self.assertIn("attention_mask" , lowerCamelCase ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def __a ( self , lowerCamelCase ) -> List[Any]: """simple docstring""" from datasets import load_dataset lowercase__ : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech lowercase__ : int = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def __a ( self ) -> List[str]: """simple docstring""" lowercase__ : List[str] = torch.tensor( [2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03, 3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03, 2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04, 4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03, 7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04, 4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] ) # fmt: on lowercase__ : List[Any] = self._load_datasamples(1 ) lowercase__ : int = SpeechTaFeatureExtractor() lowercase__ : Tuple = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 93680) ) self.assertTrue(torch.allclose(input_values[0, :30] , lowerCamelCase , atol=1E-6 ) ) def __a ( self ) -> int: """simple docstring""" lowercase__ : Optional[int] = torch.tensor( [-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77, -3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86, -3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71, -3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] ) # fmt: on lowercase__ : Any = self._load_datasamples(1 ) lowercase__ : List[Any] = SpeechTaFeatureExtractor() lowercase__ : int = feature_extractor(audio_target=lowerCamelCase , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 366, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
397
1
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : int = IFPipeline _UpperCAmelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} _UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCAmelCase : int = PipelineTesterMixin.required_optional_params - {'''latents'''} def lowerCAmelCase ( self : Optional[int]): return self._get_dummy_components() def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0): if str(SCREAMING_SNAKE_CASE__).startswith('mps'): __lowerCamelCase : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__) else: __lowerCamelCase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCAmelCase ( self : Dict): self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' ,reason='float16 requires CUDA') def lowerCAmelCase ( self : str): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1) def lowerCAmelCase ( self : Dict): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def lowerCAmelCase ( self : Optional[Any]): self._test_save_load_local() def lowerCAmelCase ( self : Optional[int]): self._test_inference_batch_single_identical( expected_max_diff=1E-2 ,) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def lowerCAmelCase ( self : Any): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @slow @require_torch_gpu class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : Any): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Optional[int]): # if __lowerCamelCase : List[Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' ,variant='fp16' ,torch_dtype=torch.floataa) __lowerCamelCase : List[str] = IFSuperResolutionPipeline.from_pretrained( 'DeepFloyd/IF-II-L-v1.0' ,variant='fp16' ,torch_dtype=torch.floataa ,text_encoder=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('cuda') __lowerCamelCase , __lowerCamelCase : List[Any] = pipe_a.encode_prompt('anime turtle' ,device='cuda') del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() __lowerCamelCase : Optional[int] = None __lowerCamelCase : List[str] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img __lowerCamelCase : int = IFImgaImgPipeline(**pipe_a.components) __lowerCamelCase : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting __lowerCamelCase : Optional[Any] = IFInpaintingPipeline(**pipe_a.components) __lowerCamelCase : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]): # pipeline 1 _start_torch_memory_measurement() __lowerCamelCase : Optional[int] = torch.Generator(device='cpu').manual_seed(0) __lowerCamelCase : List[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ ,negative_prompt_embeds=SCREAMING_SNAKE_CASE__ ,num_inference_steps=2 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np' ,) __lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) __lowerCamelCase : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 __lowerCamelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy') assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) # pipeline 2 _start_torch_memory_measurement() __lowerCamelCase : str = torch.Generator(device='cpu').manual_seed(0) __lowerCamelCase : Tuple = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ ,negative_prompt_embeds=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=2 ,output_type='np' ,) __lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __lowerCamelCase : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy') assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : int): # pipeline 1 _start_torch_memory_measurement() __lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = torch.Generator(device='cpu').manual_seed(0) __lowerCamelCase : str = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ ,negative_prompt_embeds=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,num_inference_steps=2 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np' ,) __lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) __lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 __lowerCamelCase : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy') assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) # pipeline 2 _start_torch_memory_measurement() __lowerCamelCase : List[Any] = torch.Generator(device='cpu').manual_seed(0) __lowerCamelCase : Dict = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(0)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ ,negative_prompt_embeds=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,original_image=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=2 ,output_type='np' ,) __lowerCamelCase : int = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __lowerCamelCase : int = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __lowerCamelCase : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy') assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str): # pipeline 1 _start_torch_memory_measurement() __lowerCamelCase : Dict = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(1)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = torch.Generator(device='cpu').manual_seed(0) __lowerCamelCase : Optional[int] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ ,negative_prompt_embeds=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,num_inference_steps=2 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np' ,) __lowerCamelCase : Dict = output.images[0] assert image.shape == (6_4, 6_4, 3) __lowerCamelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 __lowerCamelCase : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy') assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) # pipeline 2 _start_torch_memory_measurement() __lowerCamelCase : Tuple = torch.Generator(device='cpu').manual_seed(0) __lowerCamelCase : List[str] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(0)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(1)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ ,negative_prompt_embeds=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,original_image=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=2 ,output_type='np' ,) __lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __lowerCamelCase : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __lowerCamelCase : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy') assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) def SCREAMING_SNAKE_CASE__ ( ) -> str: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
337
a =[ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
337
1
import warnings from .generation import TFGenerationMixin class lowerCamelCase_ ( lowerCamelCase ): # warning at import time warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowerCamelCase , )
0
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowercase : Tuple = logging.getLogger(__name__) class lowerCamelCase__ ( __lowercase): '''simple docstring''' def __init__( self :Dict , a :Union[str, Any]=-1 ) -> List[str]: # in NER datasets, the last column is usually reserved for NER label __UpperCamelCase : str = label_idx def _lowerCamelCase ( self :str , a :Tuple , a :Union[Split, str] ) -> List[InputExample]: if isinstance(a , a ): __UpperCamelCase : Dict = mode.value __UpperCamelCase : Union[str, Any] = os.path.join(a , f'{mode}.txt' ) __UpperCamelCase : Any = 1 __UpperCamelCase : List[str] = [] with open(a , encoding="utf-8" ) as f: __UpperCamelCase : Tuple = [] __UpperCamelCase : Dict = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=a , labels=a ) ) guid_index += 1 __UpperCamelCase : Dict = [] __UpperCamelCase : List[str] = [] else: __UpperCamelCase : Union[str, Any] = line.split(" " ) words.append(splits[0] ) if len(a ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=a , labels=a ) ) return examples def _lowerCamelCase ( self :int , a :TextIO , a :TextIO , a :List ) -> Optional[Any]: __UpperCamelCase : Tuple = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(a ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: __UpperCamelCase : Union[str, Any] = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(a ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def _lowerCamelCase ( self :Tuple , a :str ) -> List[str]: if path: with open(a , "r" ) as f: __UpperCamelCase : List[str] = f.read().splitlines() if "O" not in labels: __UpperCamelCase : Any = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class lowerCamelCase__ ( __lowercase): '''simple docstring''' def __init__( self :List[str] ) -> int: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def _lowerCamelCase ( self :List[str] , a :str ) -> List[str]: if path: with open(a , "r" ) as f: __UpperCamelCase : Optional[Any] = f.read().splitlines() if "O" not in labels: __UpperCamelCase : int = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class lowerCamelCase__ ( __lowercase): '''simple docstring''' def _lowerCamelCase ( self :List[str] , a :Union[str, Any] , a :Union[Split, str] ) -> List[InputExample]: if isinstance(a , a ): __UpperCamelCase : Optional[Any] = mode.value __UpperCamelCase : List[str] = os.path.join(a , f'{mode}.txt' ) __UpperCamelCase : Dict = 1 __UpperCamelCase : List[str] = [] with open(a , encoding="utf-8" ) as f: for sentence in parse_incr(a ): __UpperCamelCase : Optional[int] = [] __UpperCamelCase : int = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(a ) == len(a ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=a , labels=a ) ) guid_index += 1 return examples def _lowerCamelCase ( self :List[Any] , a :TextIO , a :TextIO , a :List ) -> str: __UpperCamelCase : List[Any] = 0 for sentence in parse_incr(a ): __UpperCamelCase : Tuple = preds_list[example_id] __UpperCamelCase : Dict = "" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(a ) example_id += 1 def _lowerCamelCase ( self :List[str] , a :str ) -> List[str]: if path: with open(a , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
557
0
"""simple docstring""" from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' def is_in_circle(lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: __SCREAMING_SNAKE_CASE = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __SCREAMING_SNAKE_CASE = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCAmelCase_ ) ) # The ratio of the area for circle to square is pi/4. __SCREAMING_SNAKE_CASE = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , ): '''simple docstring''' return mean( function_to_integrate(uniform(lowerCAmelCase_ , lowerCAmelCase_ ) ) for _ in range(lowerCAmelCase_ ) ) * (max_value - min_value) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 ): '''simple docstring''' def identity_function(lowerCAmelCase_ ) -> float: return x __SCREAMING_SNAKE_CASE = area_under_curve_estimator( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' def function_to_integrate(lowerCAmelCase_ ) -> float: return sqrt(4.0 - x * x ) __SCREAMING_SNAKE_CASE = area_under_curve_estimator( lowerCAmelCase_ , lowerCAmelCase_ , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
553
"""simple docstring""" from __future__ import annotations import math class UpperCamelCase_ : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : int ) -> None: __SCREAMING_SNAKE_CASE = size # approximate the overall size of segment tree with given value __SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )] # create array to store lazy update __SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )] __SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )] # flag for lazy update def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : int ) -> int: return idx * 2 def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int ) -> int: return idx * 2 + 1 def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[int] ) -> None: if left_element == right_element: __SCREAMING_SNAKE_CASE = a[left_element - 1] else: __SCREAMING_SNAKE_CASE = (left_element + right_element) // 2 self.build(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) self.build(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = max( self.segment_tree[self.left(UpperCAmelCase__ )] , self.segment_tree[self.right(UpperCAmelCase__ )] ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool: if self.flag[idx] is True: __SCREAMING_SNAKE_CASE = self.lazy[idx] __SCREAMING_SNAKE_CASE = False if left_element != right_element: __SCREAMING_SNAKE_CASE = self.lazy[idx] __SCREAMING_SNAKE_CASE = self.lazy[idx] __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __SCREAMING_SNAKE_CASE = val if left_element != right_element: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True return True __SCREAMING_SNAKE_CASE = (left_element + right_element) // 2 self.update(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) self.update(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = max( self.segment_tree[self.left(UpperCAmelCase__ )] , self.segment_tree[self.right(UpperCAmelCase__ )] ) return True def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int | float: if self.flag[idx] is True: __SCREAMING_SNAKE_CASE = self.lazy[idx] __SCREAMING_SNAKE_CASE = False if left_element != right_element: __SCREAMING_SNAKE_CASE = self.lazy[idx] __SCREAMING_SNAKE_CASE = self.lazy[idx] __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __SCREAMING_SNAKE_CASE = (left_element + right_element) // 2 __SCREAMING_SNAKE_CASE = self.query(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.query(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return max(UpperCAmelCase__ , UpperCAmelCase__ ) def __str__( self : int ) -> str: return str([self.query(1 , 1 , self.size , UpperCAmelCase__ , UpperCAmelCase__ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a__ : Tuple = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a__ : Dict = 1_5 a__ : int = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
553
1
'''simple docstring''' import random def UpperCamelCase_ ( snake_case_ : int ) -> bool: '''simple docstring''' __lowerCAmelCase = num - 1 __lowerCAmelCase = 0 while s % 2 == 0: __lowerCAmelCase = s // 2 t += 1 for _ in range(5 ): __lowerCAmelCase = random.randrange(2 , num - 1 ) __lowerCAmelCase = pow(snake_case_ , snake_case_ , snake_case_ ) if v != 1: __lowerCAmelCase = 0 while v != (num - 1): if i == t - 1: return False else: __lowerCAmelCase = i + 1 __lowerCAmelCase = (v**2) % num return True def UpperCamelCase_ ( snake_case_ : int ) -> bool: '''simple docstring''' if num < 2: return False __lowerCAmelCase = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 1_01, 1_03, 1_07, 1_09, 1_13, 1_27, 1_31, 1_37, 1_39, 1_49, 1_51, 1_57, 1_63, 1_67, 1_73, 1_79, 1_81, 1_91, 1_93, 1_97, 1_99, 2_11, 2_23, 2_27, 2_29, 2_33, 2_39, 2_41, 2_51, 2_57, 2_63, 2_69, 2_71, 2_77, 2_81, 2_83, 2_93, 3_07, 3_11, 3_13, 3_17, 3_31, 3_37, 3_47, 3_49, 3_53, 3_59, 3_67, 3_73, 3_79, 3_83, 3_89, 3_97, 4_01, 4_09, 4_19, 4_21, 4_31, 4_33, 4_39, 4_43, 4_49, 4_57, 4_61, 4_63, 4_67, 4_79, 4_87, 4_91, 4_99, 5_03, 5_09, 5_21, 5_23, 5_41, 5_47, 5_57, 5_63, 5_69, 5_71, 5_77, 5_87, 5_93, 5_99, 6_01, 6_07, 6_13, 6_17, 6_19, 6_31, 6_41, 6_43, 6_47, 6_53, 6_59, 6_61, 6_73, 6_77, 6_83, 6_91, 7_01, 7_09, 7_19, 7_27, 7_33, 7_39, 7_43, 7_51, 7_57, 7_61, 7_69, 7_73, 7_87, 7_97, 8_09, 8_11, 8_21, 8_23, 8_27, 8_29, 8_39, 8_53, 8_57, 8_59, 8_63, 8_77, 8_81, 8_83, 8_87, 9_07, 9_11, 9_19, 9_29, 9_37, 9_41, 9_47, 9_53, 9_67, 9_71, 9_77, 9_83, 9_91, 9_97, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(snake_case_ ) def UpperCamelCase_ ( snake_case_ : int = 10_24 ) -> int: '''simple docstring''' while True: __lowerCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(snake_case_ ): return num if __name__ == "__main__": _A : List[Any] = generate_large_prime() print(('''Prime number:''', num)) print(('''is_prime_low_num:''', is_prime_low_num(num)))
427
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() _A : Union[str, Any] = logging.get_logger(__name__) _A : Optional[int] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } _A : List[Any] = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : List[str] ) -> Optional[Any]: '''simple docstring''' for attribute in key.split(""".""" ): __lowerCAmelCase = getattr(snake_case_ , snake_case_ ) if weight_type is not None: __lowerCAmelCase = getattr(snake_case_ , snake_case_ ).shape else: __lowerCAmelCase = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCAmelCase = value elif weight_type == "weight_g": __lowerCAmelCase = value elif weight_type == "weight_v": __lowerCAmelCase = value elif weight_type == "bias": __lowerCAmelCase = value else: __lowerCAmelCase = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def UpperCamelCase_ ( snake_case_ : Any , snake_case_ : List[str] ) -> Optional[Any]: '''simple docstring''' __lowerCAmelCase = [] __lowerCAmelCase = fairseq_model.state_dict() __lowerCAmelCase = hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == """group""" , ) __lowerCAmelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __lowerCAmelCase = True if "*" in mapped_key: __lowerCAmelCase = name.split(snake_case_ )[0].split(""".""" )[-2] __lowerCAmelCase = mapped_key.replace("""*""" , snake_case_ ) if "weight_g" in name: __lowerCAmelCase = """weight_g""" elif "weight_v" in name: __lowerCAmelCase = """weight_v""" elif "bias" in name and "relative_attention_bias" not in name: __lowerCAmelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase = """weight""" else: __lowerCAmelCase = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCamelCase_ ( snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple ) -> Union[str, Any]: '''simple docstring''' __lowerCAmelCase = full_name.split("""conv_layers.""" )[-1] __lowerCAmelCase = name.split(""".""" ) __lowerCAmelCase = int(items[0] ) __lowerCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCAmelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCAmelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowerCAmelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCAmelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def UpperCamelCase_ ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : int=None ) -> Optional[Any]: '''simple docstring''' __lowerCAmelCase = torch.load(snake_case_ ) __lowerCAmelCase = WavLMConfigOrig(checkpoint["""cfg"""] ) __lowerCAmelCase = WavLMOrig(snake_case_ ) model.load_state_dict(checkpoint["""model"""] ) model.eval() if config_path is not None: __lowerCAmelCase = WavLMConfig.from_pretrained(snake_case_ ) else: __lowerCAmelCase = WavLMConfig() __lowerCAmelCase = WavLMModel(snake_case_ ) recursively_load_weights(snake_case_ , snake_case_ ) hf_wavlm.save_pretrained(snake_case_ ) if __name__ == "__main__": _A : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') _A : str = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
427
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class A__( unittest.TestCase ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Any=18 , __SCREAMING_SNAKE_CASE : Optional[int]=30 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4_00 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size if size is not None else {'''height''': 18, '''width''': 20} __SCREAMING_SNAKE_CASE = do_thumbnail __SCREAMING_SNAKE_CASE = do_align_axis __SCREAMING_SNAKE_CASE = do_pad __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std def _a ( self : str ) -> List[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = DonutImageProcessor if is_vision_available() else None def _a ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = DonutImageProcessingTester(self ) @property def _a ( self : str ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_thumbnail''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_align_long_axis''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_pad''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) ) def _a ( self : Tuple ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) # Previous config had dimensions in (width, height) order __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} ) def _a ( self : Dict ) -> int: """simple docstring""" pass @is_flaky() def _a ( self : Optional[int] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def _a ( self : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def _a ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
690
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase__ ="\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" lowerCAmelCase__ ="\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" lowerCAmelCase__ ="\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__( datasets.Metric ): def _a ( self : Any ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]="binary" , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]="warn" , ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = recall_score( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , zero_division=__SCREAMING_SNAKE_CASE , ) return {"recall": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
690
1
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _snake_case ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = 1 lowerCAmelCase = 3 lowerCAmelCase = (32, 32) lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE ) return image @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(_SCREAMING_SNAKE_CASE ) @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' def extract(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): class _snake_case : def __init__( self ): '''simple docstring''' lowerCAmelCase = torch.ones([0] ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ): '''simple docstring''' self.pixel_values.to(_SCREAMING_SNAKE_CASE ) return self return Out() return extract def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.dummy_cond_unet lowerCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase = self.dummy_vae lowerCAmelCase = self.dummy_text_encoder lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk lowerCAmelCase = StableDiffusionPipeline( unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , ) lowerCAmelCase = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = 'A painting of a squirrel eating a burger' lowerCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) lowerCAmelCase = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) lowerCAmelCase = output.images lowerCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) lowerCAmelCase = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=_SCREAMING_SNAKE_CASE , )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.dummy_cond_unet lowerCAmelCase = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_vae lowerCAmelCase = self.dummy_text_encoder lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk lowerCAmelCase = StableDiffusionPipeline( unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , ) lowerCAmelCase = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = 'A painting of a squirrel eating a burger' lowerCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) lowerCAmelCase = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) lowerCAmelCase = output.images lowerCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) lowerCAmelCase = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=_SCREAMING_SNAKE_CASE , )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert isinstance(pipe.scheduler , _SCREAMING_SNAKE_CASE ) assert pipe.safety_checker is None lowerCAmelCase = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowerCAmelCase = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.dummy_cond_unet lowerCAmelCase = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.dummy_vae lowerCAmelCase = self.dummy_text_encoder lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # put models in fp16 lowerCAmelCase = unet.half() lowerCAmelCase = vae.half() lowerCAmelCase = bert.half() # make sure here that pndm scheduler skips prk lowerCAmelCase = StableDiffusionPipeline( unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , ) lowerCAmelCase = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = 'A painting of a squirrel eating a burger' lowerCAmelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCAmelCase = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) lowerCAmelCase = 40_03_66_03_46 lowerCAmelCase = 7 # without safety guidance (sld_guidance_scale = 0) lowerCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) lowerCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCAmelCase = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = 'padme amidala taking a bath artwork, safe for work, no nudity' lowerCAmelCase = 27_34_97_17_55 lowerCAmelCase = 7 lowerCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 lowerCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ) lowerCAmelCase = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) lowerCAmelCase = 10_44_35_52_34 lowerCAmelCase = 12 lowerCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 lowerCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = sd_pipe( [prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
284
'''simple docstring''' def snake_case ( snake_case : dict ) -> set: """simple docstring""" lowerCAmelCase = set() # edges = list of graph's edges lowerCAmelCase = get_edges(snake_case ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: lowerCAmelCase , lowerCAmelCase = edges.pop() chosen_vertices.add(snake_case ) chosen_vertices.add(snake_case ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(snake_case ) return chosen_vertices def snake_case ( snake_case : dict ) -> set: """simple docstring""" lowerCAmelCase = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
284
1
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def lowercase_ (A : Optional[int] ): snake_case__ : Tuple = int(A ) snake_case__ , snake_case__ , snake_case__ : Tuple = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0 return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}''' def lowercase_ (A : str , A : Union[str, Any] , A : int , A : Union[str, Any] , A : List[str]=3_0_0 ): # docstyle-ignore return F''' <div> {prefix} <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress> {label} </div> ''' def lowercase_ (A : Optional[Any] ): snake_case__ : Optional[int] = '<table border="1" class="dataframe">\n' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: snake_case__ : Any = F'''{elt:.6f}''' if isinstance(A , A ) else str(A ) html_code += F''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class snake_case__ : """simple docstring""" _SCREAMING_SNAKE_CASE = 5 _SCREAMING_SNAKE_CASE = 0.2 def __init__( self : Optional[Any], _snake_case : int, _snake_case : Optional[str] = None, _snake_case : bool = True, _snake_case : Optional["NotebookTrainingTracker"] = None, _snake_case : int = 3_0_0, ) ->Any: snake_case__ : Optional[int] = total snake_case__ : List[Any] = '' if prefix is None else prefix snake_case__ : List[Any] = leave snake_case__ : int = parent snake_case__ : Tuple = width snake_case__ : Optional[Any] = None snake_case__ : List[Any] = None snake_case__ : List[Any] = None def lowercase_ ( self : Union[str, Any], _snake_case : int, _snake_case : bool = False, _snake_case : str = None ) ->List[str]: snake_case__ : Union[str, Any] = value if comment is not None: snake_case__ : Optional[Any] = comment if self.last_value is None: snake_case__ : Dict = time.time() snake_case__ : Union[str, Any] = value snake_case__ : Any = None snake_case__ : Optional[Any] = self.warmup snake_case__ : Tuple = 1 self.update_bar(_snake_case ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total ): if self.first_calls > 0: self.first_calls -= 1 snake_case__ : Any = time.time() snake_case__ : int = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: snake_case__ : List[str] = self.elapsed_time / (value - self.start_value) else: snake_case__ : Union[str, Any] = None if value >= self.total: snake_case__ : Optional[Any] = self.total snake_case__ : Optional[Any] = None if not self.leave: self.close() elif self.average_time_per_item is not None: snake_case__ : Dict = self.average_time_per_item * (self.total - value) self.update_bar(_snake_case ) snake_case__ : Tuple = value snake_case__ : str = current_time if self.average_time_per_item is None: snake_case__ : Optional[int] = 1 else: snake_case__ : Tuple = max(int(self.update_every / self.average_time_per_item ), 1 ) def lowercase_ ( self : List[str], _snake_case : List[str], _snake_case : Optional[Any]=None ) ->List[Any]: snake_case__ : Optional[int] = ' ' * (len(str(self.total ) ) - len(str(_snake_case ) )) + str(_snake_case ) if self.elapsed_time is None: snake_case__ : str = F'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: snake_case__ : Dict = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}''' else: snake_case__ : List[Any] = ( F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <''' F''' {format_time(self.predicted_remaining )}''' ) self.label += F''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]''' self.display() def lowercase_ ( self : Tuple ) ->Dict: snake_case__ : str = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: snake_case__ : Optional[int] = disp.display(disp.HTML(self.html_code ), display_id=_snake_case ) else: self.output.update(disp.HTML(self.html_code ) ) def lowercase_ ( self : str ) ->List[str]: if self.parent is None and self.output is not None: self.output.update(disp.HTML('' ) ) class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" def __init__( self : Any, _snake_case : List[Any], _snake_case : Tuple=None ) ->Optional[Any]: super().__init__(_snake_case ) snake_case__ : Optional[Any] = None if column_names is None else [column_names] snake_case__ : Any = None def lowercase_ ( self : List[Any] ) ->str: snake_case__ : Optional[Any] = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: snake_case__ : List[str] = disp.display(disp.HTML(self.html_code ), display_id=_snake_case ) else: self.output.update(disp.HTML(self.html_code ) ) def lowercase_ ( self : Optional[Any], _snake_case : Optional[int] ) ->Optional[int]: if self.inner_table is None: snake_case__ : List[str] = [list(values.keys() ), list(values.values() )] else: snake_case__ : Tuple = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(_snake_case ) snake_case__ : Dict = columns self.inner_table.append([values[c] for c in columns] ) def lowercase_ ( self : int, _snake_case : Dict, _snake_case : Dict=None, _snake_case : List[Any]=3_0_0 ) ->str: snake_case__ : Dict = NotebookProgressBar(_snake_case, prefix=_snake_case, parent=self, width=_snake_case ) return self.child_bar def lowercase_ ( self : str ) ->str: snake_case__ : Tuple = None self.display() class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" def __init__( self : Dict ) ->str: snake_case__ : Dict = None snake_case__ : int = None snake_case__ : Optional[Any] = False def lowercase_ ( self : Union[str, Any], _snake_case : Optional[Any], _snake_case : Any, _snake_case : str, **_snake_case : Dict ) ->str: snake_case__ : List[str] = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step' snake_case__ : Tuple = 0 snake_case__ : Optional[int] = 0 snake_case__ : Tuple = [self.first_column] + ['Training Loss'] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('Validation Loss' ) snake_case__ : Optional[int] = NotebookTrainingTracker(state.max_steps, _snake_case ) def lowercase_ ( self : str, _snake_case : Union[str, Any], _snake_case : List[Any], _snake_case : List[Any], **_snake_case : Tuple ) ->List[str]: snake_case__ : str = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, ) snake_case__ : Optional[Any] = False def lowercase_ ( self : List[Any], _snake_case : str, _snake_case : int, _snake_case : Optional[Any], _snake_case : Dict=None, **_snake_case : Optional[int] ) ->Dict: if not has_length(_snake_case ): return if self.prediction_bar is None: if self.training_tracker is not None: snake_case__ : Tuple = self.training_tracker.add_child(len(_snake_case ) ) else: snake_case__ : Optional[Any] = NotebookProgressBar(len(_snake_case ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def lowercase_ ( self : List[str], _snake_case : List[str], _snake_case : Optional[int], _snake_case : Dict, **_snake_case : Any ) ->Any: if self.prediction_bar is not None: self.prediction_bar.close() snake_case__ : Any = None def lowercase_ ( self : Any, _snake_case : List[str], _snake_case : str, _snake_case : Dict, _snake_case : Optional[Any]=None, **_snake_case : Any ) ->str: # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: snake_case__ : List[Any] = {'Training Loss': logs['loss']} # First column is necessarily Step sine we're not in epoch eval strategy snake_case__ : Dict = state.global_step self.training_tracker.write_line(_snake_case ) def lowercase_ ( self : List[Any], _snake_case : str, _snake_case : Union[str, Any], _snake_case : Union[str, Any], _snake_case : int=None, **_snake_case : Any ) ->str: if self.training_tracker is not None: snake_case__ : str = {'Training Loss': 'No log', 'Validation Loss': 'No log'} for log in reversed(state.log_history ): if "loss" in log: snake_case__ : Any = log['loss'] break if self.first_column == "Epoch": snake_case__ : Any = int(state.epoch ) else: snake_case__ : Optional[Any] = state.global_step snake_case__ : Dict = 'eval' for k in metrics: if k.endswith('_loss' ): snake_case__ : Union[str, Any] = re.sub(R'\_loss$', '', _snake_case ) snake_case__ : str = metrics.pop('total_flos', _snake_case ) snake_case__ : Optional[int] = metrics.pop('epoch', _snake_case ) snake_case__ : List[str] = metrics.pop(F'''{metric_key_prefix}_runtime''', _snake_case ) snake_case__ : Any = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', _snake_case ) snake_case__ : str = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', _snake_case ) snake_case__ : int = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', _snake_case ) for k, v in metrics.items(): if k == F'''{metric_key_prefix}_loss''': snake_case__ : Union[str, Any] = v else: snake_case__ : Union[str, Any] = k.split('_' ) snake_case__ : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]] ) snake_case__ : Any = v self.training_tracker.write_line(_snake_case ) self.training_tracker.remove_child() snake_case__ : Optional[Any] = None # Evaluation takes a long time so we should force the next update. snake_case__ : List[Any] = True def lowercase_ ( self : Optional[int], _snake_case : str, _snake_case : Optional[int], _snake_case : List[Any], **_snake_case : Union[str, Any] ) ->Union[str, Any]: self.training_tracker.update( state.global_step, comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''', force_update=_snake_case ) snake_case__ : Optional[Any] = None
243
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ :str = { "configuration_instructblip": [ "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "InstructBlipConfig", "InstructBlipQFormerConfig", "InstructBlipVisionConfig", ], "processing_instructblip": ["InstructBlipProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :Optional[int] = [ "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "InstructBlipQFormerModel", "InstructBlipPreTrainedModel", "InstructBlipForConditionalGeneration", "InstructBlipVisionModel", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys a_ :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
243
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters _lowercase : List[Any] = (7_20, 12_80) # Height, Width _lowercase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it. _lowercase : Dict = 1 / 1_00 _lowercase : Dict = '' _lowercase : List[Any] = '' _lowercase : Any = '' _lowercase : List[str] = 2_50 def lowercase__ ( ): __UpperCAmelCase , __UpperCAmelCase = get_dataset(lowerCAmelCase__ , lowerCAmelCase__ ) for index in range(lowerCAmelCase__ ): __UpperCAmelCase = random.sample(range(len(lowerCAmelCase__ ) ) , 4 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , filter_scale=lowerCAmelCase__ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __UpperCAmelCase = random_chars(32 ) __UpperCAmelCase = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0] __UpperCAmelCase = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}''' cva.imwrite(F'''{file_root}.jpg''' , lowerCAmelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' ) __UpperCAmelCase = [] for anno in new_annos: __UpperCAmelCase = anno[3] - anno[1] __UpperCAmelCase = anno[4] - anno[2] __UpperCAmelCase = anno[1] + width / 2 __UpperCAmelCase = anno[2] + height / 2 __UpperCAmelCase = F'''{anno[0]} {x_center} {y_center} {width} {height}''' annos_list.append(lowerCAmelCase__ ) with open(F'''{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Optional[Any] ): __UpperCAmelCase = [] __UpperCAmelCase = [] for label_file in glob.glob(os.path.join(lowerCAmelCase__ , '''*.txt''' ) ): __UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(lowerCAmelCase__ ) as in_file: __UpperCAmelCase = in_file.readlines() __UpperCAmelCase = os.path.join(lowerCAmelCase__ , F'''{label_name}.jpg''' ) __UpperCAmelCase = [] for obj_list in obj_lists: __UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) __UpperCAmelCase = float(obj[1] ) - float(obj[3] ) / 2 __UpperCAmelCase = float(obj[2] ) - float(obj[4] ) / 2 __UpperCAmelCase = float(obj[1] ) + float(obj[3] ) / 2 __UpperCAmelCase = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(lowerCAmelCase__ ) labels.append(lowerCAmelCase__ ) return img_paths, labels def lowercase__ ( snake_case_ :Tuple , snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :Optional[int] = 0.0 , ): __UpperCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) __UpperCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __UpperCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __UpperCAmelCase = int(scale_x * output_size[1] ) __UpperCAmelCase = int(scale_y * output_size[0] ) __UpperCAmelCase = [] __UpperCAmelCase = [] for i, index in enumerate(lowerCAmelCase__ ): __UpperCAmelCase = all_img_list[index] path_list.append(lowerCAmelCase__ ) __UpperCAmelCase = all_annos[index] __UpperCAmelCase = cva.imread(lowerCAmelCase__ ) if i == 0: # top-left __UpperCAmelCase = cva.resize(lowerCAmelCase__ , (divid_point_x, divid_point_y) ) __UpperCAmelCase = img for bbox in img_annos: __UpperCAmelCase = bbox[1] * scale_x __UpperCAmelCase = bbox[2] * scale_y __UpperCAmelCase = bbox[3] * scale_x __UpperCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right __UpperCAmelCase = cva.resize(lowerCAmelCase__ , (output_size[1] - divid_point_x, divid_point_y) ) __UpperCAmelCase = img for bbox in img_annos: __UpperCAmelCase = scale_x + bbox[1] * (1 - scale_x) __UpperCAmelCase = bbox[2] * scale_y __UpperCAmelCase = scale_x + bbox[3] * (1 - scale_x) __UpperCAmelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left __UpperCAmelCase = cva.resize(lowerCAmelCase__ , (divid_point_x, output_size[0] - divid_point_y) ) __UpperCAmelCase = img for bbox in img_annos: __UpperCAmelCase = bbox[1] * scale_x __UpperCAmelCase = scale_y + bbox[2] * (1 - scale_y) __UpperCAmelCase = bbox[3] * scale_x __UpperCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right __UpperCAmelCase = cva.resize( lowerCAmelCase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) __UpperCAmelCase = img for bbox in img_annos: __UpperCAmelCase = scale_x + bbox[1] * (1 - scale_x) __UpperCAmelCase = scale_y + bbox[2] * (1 - scale_y) __UpperCAmelCase = scale_x + bbox[3] * (1 - scale_x) __UpperCAmelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: __UpperCAmelCase = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def lowercase__ ( snake_case_ :Dict ): assert number_char > 1, "The number of character should greater than 1" __UpperCAmelCase = ascii_lowercase + digits return "".join(random.choice(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) ) if __name__ == "__main__": main() print('DONE ✅')
49
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __UpperCAmelCase ( __A ): """simple docstring""" _lowerCamelCase = ( """This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.""" """It takes two arguments named `image` which should be the original image, and `label` which should be a text """ """describing the elements what should be identified in the segmentation mask. The tool returns the mask.""" ) _lowerCamelCase = """CIDAS/clipseg-rd64-refined""" _lowerCamelCase = """image_segmenter""" _lowerCamelCase = CLIPSegForImageSegmentation _lowerCamelCase = ["""image""", """text"""] _lowerCamelCase = ["""image"""] def __init__( self , *__A , **__A ): requires_backends(self , ["""vision"""] ) super().__init__(*__A , **__A ) def snake_case_ ( self , __A , __A ): return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" ) def snake_case_ ( self , __A ): with torch.no_grad(): __a = self.model(**__A ).logits return logits def snake_case_ ( self , __A ): __a = outputs.cpu().detach().numpy() __a = 0 __a = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
99
0
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) _UpperCamelCase: Optional[int] ='hf-internal-testing/tiny-random-bert' _UpperCamelCase: Dict =os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert') _UpperCamelCase: List[str] ='9b8c223d42b2188cb49d29af482996f9d0f3e5a6' class __lowercase( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]: _lowerCAmelCase = cached_file(_lowerCAmelCase , _lowerCAmelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_lowerCAmelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) ) ) with open(os.path.join(_lowerCAmelCase , 'refs' , 'main' ) ) as f: _lowerCAmelCase = f.read() self.assertEqual(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'snapshots' , _lowerCAmelCase , _lowerCAmelCase ) ) self.assertTrue(os.path.isfile(_lowerCAmelCase ) ) # File is cached at the same place the second time. _lowerCAmelCase = cached_file(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) # Using a specific revision to test the full commit hash. _lowerCAmelCase = cached_file(_lowerCAmelCase , _lowerCAmelCase , revision='9b8c223' ) self.assertEqual(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'snapshots' , _lowerCAmelCase , _lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]: with self.assertRaisesRegex(_lowerCAmelCase , 'is not a valid model identifier' ): _lowerCAmelCase = cached_file('tiny-random-bert' , _lowerCAmelCase ) with self.assertRaisesRegex(_lowerCAmelCase , 'is not a valid git identifier' ): _lowerCAmelCase = cached_file(_lowerCAmelCase , _lowerCAmelCase , revision='aaaa' ) with self.assertRaisesRegex(_lowerCAmelCase , 'does not appear to have a file named' ): _lowerCAmelCase = cached_file(_lowerCAmelCase , 'conf' ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple: with self.assertRaisesRegex(_lowerCAmelCase , 'does not appear to have a file named' ): _lowerCAmelCase = cached_file(_lowerCAmelCase , 'conf' ) with open(os.path.join(_lowerCAmelCase , 'refs' , 'main' ) ) as f: _lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(_lowerCAmelCase , '.no_exist' , _lowerCAmelCase , 'conf' ) ) ) _lowerCAmelCase = cached_file(_lowerCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_lowerCAmelCase ) self.assertIsNone(_lowerCAmelCase ) _lowerCAmelCase = cached_file(_lowerCAmelCase , 'conf' , local_files_only=_lowerCAmelCase , _raise_exceptions_for_missing_entries=_lowerCAmelCase ) self.assertIsNone(_lowerCAmelCase ) _lowerCAmelCase = mock.Mock() _lowerCAmelCase = 500 _lowerCAmelCase = {} _lowerCAmelCase = HTTPError _lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=_lowerCAmelCase ) as mock_head: _lowerCAmelCase = cached_file(_lowerCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_lowerCAmelCase ) self.assertIsNone(_lowerCAmelCase ) # This check we did call the fake head request mock_head.assert_called() def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict: self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _lowerCAmelCase ) ) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _lowerCAmelCase ) ) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]: # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_lowerCAmelCase , 'is not a valid model identifier' ): get_file_from_repo('bert-base-case' , _lowerCAmelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_lowerCAmelCase , 'is not a valid git identifier' ): get_file_from_repo('bert-base-cased' , _lowerCAmelCase , revision='ahaha' ) _lowerCAmelCase = get_file_from_repo('bert-base-cased' , _lowerCAmelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. _lowerCAmelCase = json.loads(open(_lowerCAmelCase , 'r' ).read() ) self.assertEqual(config['hidden_size'] , 768 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase = Path(_lowerCAmelCase ) / 'a.txt' filename.touch() self.assertEqual(get_file_from_repo(_lowerCAmelCase , 'a.txt' ) , str(_lowerCAmelCase ) ) self.assertIsNone(get_file_from_repo(_lowerCAmelCase , 'b.txt' ) )
585
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _a ( __SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" _lowerCAmelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( __SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = emb.weight.shape _lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = emb.weight.data return lin_layer def _a ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=None ): """simple docstring""" _lowerCAmelCase = {} for old_key in state_dict.keys(): _lowerCAmelCase = old_key if "moe_layer.experts." in key: if expert_idx is not None: _lowerCAmelCase = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' ) else: _lowerCAmelCase = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' ) if "gate" in key: _lowerCAmelCase = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' ) if "fc2" and "experts" not in key: _lowerCAmelCase = key.replace('.fc2.' , '.ffn.fc2.' ) if "fc1" and "experts" not in key: _lowerCAmelCase = key.replace('.fc1.' , '.ffn.fc1.' ) if ".encoder_attn." in key: _lowerCAmelCase = key.replace('.encoder_attn.' , '.cross_attention.' ) if "encoder_attn_layer_norm" in key: _lowerCAmelCase = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' ) if "final_layer_norm" in key: _lowerCAmelCase = key.replace('final_layer_norm' , 'ff_layer_norm' ) _lowerCAmelCase = state_dict[old_key] return new_dict def _a ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str = WEIGHTS_NAME ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = 0 os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) for expert in range(__SCREAMING_SNAKE_CASE ): _lowerCAmelCase = switch_checkpoint_path + f'''-rank-{expert}.pt''' if os.path.isfile(__SCREAMING_SNAKE_CASE ): _lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )['model'] remove_ignore_keys_(__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = rename_fairseq_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _lowerCAmelCase = os.path.join( __SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , f'''-{len(__SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) ) torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__SCREAMING_SNAKE_CASE )[0]].dtype ) # Add the last block _lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , f'''-{len(__SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) ) _lowerCAmelCase = torch.load(switch_checkpoint_path + '-shared.pt' )['model'] remove_ignore_keys_(__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = rename_fairseq_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _lowerCAmelCase = shared_weights['decoder.embed_tokens.weight'] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__SCREAMING_SNAKE_CASE ) == 1: _lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Otherwise, let's build the index _lowerCAmelCase = {} for idx, shard in enumerate(__SCREAMING_SNAKE_CASE ): _lowerCAmelCase = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__SCREAMING_SNAKE_CASE ):05d}.bin''' ) _lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) for key in shard: _lowerCAmelCase = shard_file # Add the metadata _lowerCAmelCase = {'total_size': total_size} _lowerCAmelCase = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 'w' , encoding='utf-8' ) as f: _lowerCAmelCase = json.dumps(__SCREAMING_SNAKE_CASE , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE ) + '\n' f.write(__SCREAMING_SNAKE_CASE ) return metadata, index if __name__ == "__main__": _UpperCamelCase: Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) _UpperCamelCase: Tuple =parser.parse_args() _UpperCamelCase , _UpperCamelCase: str =shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) _UpperCamelCase: str =NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) _UpperCamelCase: List[Any] =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
585
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""", """facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""", """facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""", """facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""", """facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""", """facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""", """facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""", """facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""", """facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""", } class a ( UpperCAmelCase__ ): UpperCamelCase : Tuple = 'xmod' def __init__( self : str , lowerCAmelCase : Dict=3_0522 , lowerCAmelCase : Optional[int]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=512 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Optional[int]=0.0_2 , lowerCAmelCase : List[str]=1E-12 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : int=0 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]="absolute" , lowerCAmelCase : Any=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=False , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Any=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Optional[Any]=("en_XX",) , lowerCAmelCase : str=None , **lowerCAmelCase : str , ) -> List[str]: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple =vocab_size SCREAMING_SNAKE_CASE_: Tuple =hidden_size SCREAMING_SNAKE_CASE_: Any =num_hidden_layers SCREAMING_SNAKE_CASE_: int =num_attention_heads SCREAMING_SNAKE_CASE_: str =hidden_act SCREAMING_SNAKE_CASE_: Any =intermediate_size SCREAMING_SNAKE_CASE_: int =hidden_dropout_prob SCREAMING_SNAKE_CASE_: Optional[int] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: Optional[Any] =max_position_embeddings SCREAMING_SNAKE_CASE_: Optional[int] =type_vocab_size SCREAMING_SNAKE_CASE_: List[str] =initializer_range SCREAMING_SNAKE_CASE_: Optional[Any] =layer_norm_eps SCREAMING_SNAKE_CASE_: int =position_embedding_type SCREAMING_SNAKE_CASE_: Optional[int] =use_cache SCREAMING_SNAKE_CASE_: List[Any] =classifier_dropout SCREAMING_SNAKE_CASE_: Union[str, Any] =pre_norm SCREAMING_SNAKE_CASE_: List[str] =adapter_reduction_factor SCREAMING_SNAKE_CASE_: Tuple =adapter_layer_norm SCREAMING_SNAKE_CASE_: str =adapter_reuse_layer_norm SCREAMING_SNAKE_CASE_: List[Any] =ln_before_adapter SCREAMING_SNAKE_CASE_: str =list(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Dict =default_language class a ( UpperCAmelCase__ ): @property def lowerCamelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_: List[str] ={0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE_: Any ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
409
"""simple docstring""" from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _UpperCAmelCase = 1_0 def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ): for i in range(lowercase , lowercase ): if array[i] == target: return i return -1 def __magic_name__ ( lowercase , lowercase ): SCREAMING_SNAKE_CASE_: Optional[Any] =0 SCREAMING_SNAKE_CASE_: List[Any] =len(lowercase ) while left <= right: if right - left < precision: return lin_search(lowercase , lowercase , lowercase , lowercase ) SCREAMING_SNAKE_CASE_: Optional[Any] =(left + right) // 3 + 1 SCREAMING_SNAKE_CASE_: str =2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: SCREAMING_SNAKE_CASE_: Union[str, Any] =one_third - 1 elif array[two_third] < target: SCREAMING_SNAKE_CASE_: Optional[int] =two_third + 1 else: SCREAMING_SNAKE_CASE_: List[str] =one_third + 1 SCREAMING_SNAKE_CASE_: Optional[Any] =two_third - 1 else: return -1 def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ): if left < right: if right - left < precision: return lin_search(lowercase , lowercase , lowercase , lowercase ) SCREAMING_SNAKE_CASE_: Union[str, Any] =(left + right) // 3 + 1 SCREAMING_SNAKE_CASE_: List[str] =2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowercase , one_third - 1 , lowercase , lowercase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowercase , lowercase , lowercase ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase , lowercase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase = input("""Enter numbers separated by comma:\n""").strip() _UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _UpperCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip()) _UpperCAmelCase = ite_ternary_search(collection, target) _UpperCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("""Not found""")
409
1
from statistics import mean import numpy as np def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : int) -> list: '''simple docstring''' __UpperCamelCase : Optional[int] = 0 # Number of processes finished __UpperCamelCase : Any = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. __UpperCamelCase : Any = [0] * no_of_process # List to include calculation results __UpperCamelCase : Any = [0] * no_of_process # Sort by arrival time. __UpperCamelCase : int = [burst_time[i] for i in np.argsort(_lowerCamelCase)] __UpperCamelCase : Dict = [process_name[i] for i in np.argsort(_lowerCamelCase)] arrival_time.sort() while no_of_process > finished_process_count: __UpperCamelCase : Dict = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: __UpperCamelCase : int = arrival_time[i] __UpperCamelCase : List[Any] = 0 # Index showing the location of the process being performed __UpperCamelCase : Any = 0 # Saves the current response ratio. __UpperCamelCase : Union[str, Any] = 0 for i in range(0 , _lowerCamelCase): if finished_process[i] == 0 and arrival_time[i] <= current_time: __UpperCamelCase : Union[str, Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: __UpperCamelCase : Tuple = temp __UpperCamelCase : str = i # Calculate the turn around time __UpperCamelCase : Optional[Any] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. __UpperCamelCase : str = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : int) -> list: '''simple docstring''' __UpperCamelCase : Dict = [0] * no_of_process for i in range(0 , _lowerCamelCase): __UpperCamelCase : Optional[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": lowercase : Any = 5 lowercase : List[str] = ['A', 'B', 'C', 'D', 'E'] lowercase : str = [1, 2, 3, 4, 5] lowercase : Optional[int] = [1, 2, 3, 4, 5] lowercase : Dict = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) lowercase : str = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time') for i in range(0, no_of_process): print( f"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t" f"{turn_around_time[i]}\t\t\t{waiting_time[i]}" ) print(f"average waiting time : {mean(waiting_time):.5f}") print(f"average turn around time : {mean(turn_around_time):.5f}")
94
from __future__ import annotations lowercase : str = list[tuple[int, int]] lowercase : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : int = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class lowerCamelCase__ : '''simple docstring''' def __init__( self :Optional[int] , a :int , a :int , a :int , a :int , a :float , a :Node | None , ) -> List[Any]: __UpperCamelCase : List[Any] = pos_x __UpperCamelCase : List[str] = pos_y __UpperCamelCase : str = (pos_y, pos_x) __UpperCamelCase : Optional[int] = goal_x __UpperCamelCase : str = goal_y __UpperCamelCase : int = g_cost __UpperCamelCase : Dict = parent __UpperCamelCase : str = self.calculate_heuristic() def _lowerCamelCase ( self :List[Any] ) -> float: __UpperCamelCase : Any = abs(self.pos_x - self.goal_x ) __UpperCamelCase : Tuple = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self :Union[str, Any] , a :Dict ) -> bool: return self.f_cost < other.f_cost class lowerCamelCase__ : '''simple docstring''' def __init__( self :List[str] , a :tuple[int, int] , a :tuple[int, int] ) -> List[str]: __UpperCamelCase : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a ) __UpperCamelCase : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , a ) __UpperCamelCase : Optional[int] = [self.start] __UpperCamelCase : list[Node] = [] __UpperCamelCase : Any = False def _lowerCamelCase ( self :List[str] ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __UpperCamelCase : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: __UpperCamelCase : Dict = True return self.retrace_path(a ) self.closed_nodes.append(a ) __UpperCamelCase : Union[str, Any] = self.get_successors(a ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(a ) else: # retrieve the best current path __UpperCamelCase : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(a ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(a ) else: self.open_nodes.append(a ) if not self.reached: return [self.start.pos] return None def _lowerCamelCase ( self :str , a :Node ) -> list[Node]: __UpperCamelCase : List[Any] = [] for action in delta: __UpperCamelCase : Optional[Any] = parent.pos_x + action[1] __UpperCamelCase : List[Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( a , a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a , ) ) return successors def _lowerCamelCase ( self :Optional[Any] , a :Node | None ) -> Path: __UpperCamelCase : str = node __UpperCamelCase : int = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __UpperCamelCase : str = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase : List[str] = (0, 0) lowercase : int = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase : Any = GreedyBestFirst(init, goal) lowercase : List[str] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase : Optional[int] = 2 for elem in grid: print(elem)
94
1
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' A , A: Dict = 9, 14 # noqa: F841 A: Dict = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] A: Union[str, Any] = defaultdict(lowerCamelCase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) A: Union[str, Any] = mst(lowerCamelCase__ ) A: Union[str, Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: A: Tuple = tuple(answer[:2] ) A: Union[str, Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
135
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE__ ( snake_case_ ): """simple docstring""" A__ : Union[str, Any] = (DDIMParallelScheduler,) A__ : Optional[Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def a__ ( self , **A ) -> Union[str, Any]: A: str = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """clip_sample""": True, } config.update(**A ) return config def a__ ( self , **A ) -> Tuple: A: Optional[int] = self.scheduler_classes[0] A: Optional[Any] = self.get_scheduler_config(**A ) A: int = scheduler_class(**A ) A , A: Union[str, Any] = 10, 0.0 A: List[str] = self.dummy_model() A: Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(A ) for t in scheduler.timesteps: A: List[str] = model(A , A ) A: Optional[int] = scheduler.step(A , A , A , A ).prev_sample return sample def a__ ( self ) -> Dict: for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=A ) def a__ ( self ) -> Dict: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=A ) A: List[Any] = self.scheduler_classes[0] A: List[Any] = self.get_scheduler_config(steps_offset=1 ) A: int = scheduler_class(**A ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def a__ ( self ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A , beta_end=A ) def a__ ( self ) -> int: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A ) def a__ ( self ) -> Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A ) def a__ ( self ) -> List[str]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=A ) def a__ ( self ) -> Optional[int]: for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=A ) def a__ ( self ) -> Tuple: for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=A ) def a__ ( self ) -> Tuple: self.check_over_configs(thresholding=A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=A , prediction_type=A , sample_max_value=A , ) def a__ ( self ) -> Union[str, Any]: for t in [1, 10, 49]: self.check_over_forward(time_step=A ) def a__ ( self ) -> int: for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ): self.check_over_forward(time_step=A , num_inference_steps=A ) def a__ ( self ) -> Optional[Any]: for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=A , eta=A ) def a__ ( self ) -> Union[str, Any]: A: Tuple = self.scheduler_classes[0] A: List[Any] = self.get_scheduler_config() A: int = scheduler_class(**A ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5 def a__ ( self ) -> Dict: A: Optional[Any] = self.scheduler_classes[0] A: int = self.get_scheduler_config() A: int = scheduler_class(**A ) A , A: str = 10, 0.0 scheduler.set_timesteps(A ) A: Tuple = self.dummy_model() A: Optional[int] = self.dummy_sample_deter A: int = self.dummy_sample_deter + 0.1 A: List[str] = self.dummy_sample_deter - 0.1 A: Any = samplea.shape[0] A: int = torch.stack([samplea, samplea, samplea] , dim=0 ) A: Optional[Any] = torch.arange(A )[0:3, None].repeat(1 , A ) A: List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) A: Dict = scheduler.batch_step_no_noise(A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , A ) A: Tuple = torch.sum(torch.abs(A ) ) A: Tuple = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def a__ ( self ) -> Tuple: A: Dict = self.full_loop() A: str = torch.sum(torch.abs(A ) ) A: Union[str, Any] = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.223967 ) < 1e-3 def a__ ( self ) -> Optional[Any]: A: str = self.full_loop(prediction_type="""v_prediction""" ) A: Any = torch.sum(torch.abs(A ) ) A: Union[str, Any] = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def a__ ( self ) -> List[Any]: # We specify different beta, so that the first alpha is 0.99 A: Dict = self.full_loop(set_alpha_to_one=A , beta_start=0.01 ) A: Tuple = torch.sum(torch.abs(A ) ) A: Union[str, Any] = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def a__ ( self ) -> Tuple: # We specify different beta, so that the first alpha is 0.99 A: List[Any] = self.full_loop(set_alpha_to_one=A , beta_start=0.01 ) A: List[Any] = torch.sum(torch.abs(A ) ) A: Optional[int] = torch.mean(torch.abs(A ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
135
1
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Optional[int] = IFPipeline __UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def _UpperCamelCase ( self ): return self._get_dummy_components() def _UpperCamelCase ( self , a_ , a_=0 ): if str(a_ ).startswith("mps" ): lowerCamelCase_ : int = torch.manual_seed(a_ ) else: lowerCamelCase_ : Any = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ : List[Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def _UpperCamelCase ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def _UpperCamelCase ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def _UpperCamelCase ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _UpperCamelCase ( self ): self._test_save_load_local() def _UpperCamelCase ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _UpperCamelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self ): # if lowerCamelCase_ : Optional[Any] = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) lowerCamelCase_ : Optional[Any] = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=a_ , tokenizer=a_ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) lowerCamelCase_ : List[Any] = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() lowerCamelCase_ : Optional[Any] = None lowerCamelCase_ : Tuple = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(a_ , a_ , a_ , a_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img lowerCamelCase_ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) lowerCamelCase_ : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(a_ , a_ , a_ , a_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting lowerCamelCase_ : int = IFInpaintingPipeline(**pipe_a.components ) lowerCamelCase_ : List[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(a_ , a_ , a_ , a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): # pipeline 1 _start_torch_memory_measurement() lowerCamelCase_ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase_ : Union[str, Any] = pipe_a( prompt_embeds=a_ , negative_prompt_embeds=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , ) lowerCamelCase_ : List[str] = output.images[0] assert image.shape == (64, 64, 3) lowerCamelCase_ : Optional[int] = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 lowerCamelCase_ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(a_ , a_ ) # pipeline 2 _start_torch_memory_measurement() lowerCamelCase_ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ ) lowerCamelCase_ : List[Any] = pipe_a( prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , ) lowerCamelCase_ : Dict = output.images[0] assert image.shape == (256, 256, 3) lowerCamelCase_ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowerCamelCase_ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(a_ , a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): # pipeline 1 _start_torch_memory_measurement() lowerCamelCase_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ ) lowerCamelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase_ : int = pipe_a( prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , ) lowerCamelCase_ : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) lowerCamelCase_ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowerCamelCase_ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(a_ , a_ ) # pipeline 2 _start_torch_memory_measurement() lowerCamelCase_ : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase_ : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ ) lowerCamelCase_ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ ) lowerCamelCase_ : List[Any] = pipe_a( prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , ) lowerCamelCase_ : Any = output.images[0] assert image.shape == (256, 256, 3) lowerCamelCase_ : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowerCamelCase_ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(a_ , a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): # pipeline 1 _start_torch_memory_measurement() lowerCamelCase_ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ ) lowerCamelCase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a_ ) lowerCamelCase_ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase_ : Any = pipe_a( prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , ) lowerCamelCase_ : List[Any] = output.images[0] assert image.shape == (64, 64, 3) lowerCamelCase_ : str = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowerCamelCase_ : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(a_ , a_ ) # pipeline 2 _start_torch_memory_measurement() lowerCamelCase_ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ ) lowerCamelCase_ : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ ) lowerCamelCase_ : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a_ ) lowerCamelCase_ : Optional[int] = pipe_a( prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , ) lowerCamelCase_ : Tuple = output.images[0] assert image.shape == (256, 256, 3) lowerCamelCase_ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowerCamelCase_ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(a_ , a_ ) def __magic_name__ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
713
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''EncodecFeatureExtractor''' __UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , a_ , a_ ): super().__init__(a_ , a_ ) lowerCamelCase_ : Optional[Any] = self.feature_extractor lowerCamelCase_ : Optional[int] = False def _UpperCamelCase ( self , a_=None , a_=None , a_=True ): return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ ) def __call__( self , *a_ , **a_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*a_ , **a_ ) lowerCamelCase_ : str = kwargs.pop("audio" , a_ ) lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ ) lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ ) if len(a_ ) > 0: lowerCamelCase_ : int = args[0] lowerCamelCase_ : str = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ ) if audio is not None: lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ ) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCamelCase_ : Dict = audio_inputs["input_values"] if "padding_mask" in audio_inputs: lowerCamelCase_ : int = audio_inputs["padding_mask"] return inputs def _UpperCamelCase ( self , *a_ , **a_ ): lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ ) lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ ) if len(a_ ) > 0: lowerCamelCase_ : Optional[int] = args[0] lowerCamelCase_ : Optional[Any] = args[1:] if audio_values is not None: return self._decode_audio(a_ , padding_mask=a_ ) else: return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Any = to_numpy(a_ ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape if padding_mask is None: return list(a_ ) lowerCamelCase_ : Tuple = to_numpy(a_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1] lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ ) lowerCamelCase_ : str = audio_values.tolist() for i in range(a_ ): lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 ) return audio_values
73
0
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Union[str, Any]: if index == r: for j in range(__UpperCAmelCase ): print(data[j] , end=' ' ) print(' ' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location SCREAMING_SNAKE_CASE_ = arr[i] combination_util(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , index + 1 , __UpperCAmelCase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] ) -> Optional[int]: # A temporary array to store all combination one by one SCREAMING_SNAKE_CASE_ = [0] * r # Print all combination using temporary array 'data[]' combination_util(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , 0 , __UpperCAmelCase , 0 ) if __name__ == "__main__": # Driver code to check the function above lowerCamelCase__ : Tuple = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
31
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCamelCase__ : Optional[int] = logging.get_logger(__name__) class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ): warnings.warn( 'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
31
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _a ( unittest.TestCase ): '''simple docstring''' def _A ( self ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=__UpperCAmelCase , ) assert hasattr(self , "env" ) def _A ( self , __UpperCAmelCase ): """simple docstring""" a__ : Union[str, Any] = { "enabled": True, "processes_per_host": 8, } a__ : Tuple = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } a__ : Optional[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} a__ : Union[str, Any] = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, } , metric_definitions=self.env.metric_definitions , distribution=__UpperCAmelCase , py_version="py36" , ) def _A ( self , __UpperCAmelCase ): """simple docstring""" TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(1,)] ) def _A ( self , __UpperCAmelCase ): """simple docstring""" a__ : str = self.create_estimator(__UpperCAmelCase ) # run training estimator.fit() # result dataframe a__ : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis a__ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) a__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping a__ : str = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __UpperCAmelCase )
207
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def SCREAMING_SNAKE_CASE( __UpperCamelCase = 8 ) -> str: a__ : Optional[int] = ascii_letters + digits + punctuation return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) ) def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> str: # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(__UpperCamelCase ) a__ : List[Any] = i // 3 a__ : int = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) a__ : Union[str, Any] = ( chars_incl + random(__UpperCamelCase , quotient + remainder ) + random(__UpperCamelCase , __UpperCamelCase ) + random(__UpperCamelCase , __UpperCamelCase ) ) a__ : Tuple = list(__UpperCamelCase ) shuffle(__UpperCamelCase ) return "".join(__UpperCamelCase ) # random is a generalised function for letters, characters and numbers def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> str: return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) ) def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> List[str]: pass # Put your code here... def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: pass # Put your code here... def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> List[Any]: pass # Put your code here... def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase = 8 ) -> bool: if len(__UpperCamelCase ) < min_length: # Your Password must be at least 8 characters long return False a__ : Dict = any(char in ascii_uppercase for char in password ) a__ : Optional[int] = any(char in ascii_lowercase for char in password ) a__ : Optional[Any] = any(char in digits for char in password ) a__ : Tuple = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def SCREAMING_SNAKE_CASE( ) -> Dict: a__ : List[Any] = int(input("Please indicate the max length of your password: " ).strip() ) a__ : Optional[Any] = input( "Please indicate the characters that must be in your password: " ).strip() print("Password generated:" , password_generator(__UpperCamelCase ) ) print( "Alternative Password generated:" , alternative_password_generator(__UpperCamelCase , __UpperCamelCase ) , ) print("[If you are thinking of using this passsword, You better save it.]" ) if __name__ == "__main__": main()
207
1
"""simple docstring""" import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _lowerCAmelCase :Optional[Any] = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model _lowerCAmelCase :Optional[Any] = { # fairseq: 'wmt19-ru-en': {'length_penalty': 1.1}, 'wmt19-en-ru': {'length_penalty': 1.15}, 'wmt19-en-de': {'length_penalty': 1.0}, 'wmt19-de-en': {'length_penalty': 1.1}, # allenai: 'wmt16-en-de-dist-12-1': {'length_penalty': 0.6}, 'wmt16-en-de-dist-6-1': {'length_penalty': 0.6}, 'wmt16-en-de-12-1': {'length_penalty': 0.8}, 'wmt19-de-en-6-6-base': {'length_penalty': 0.6}, 'wmt19-de-en-6-6-big': {'length_penalty': 0.6}, } # this remaps the different models to their organization names _lowerCAmelCase :Optional[Any] = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: _lowerCAmelCase :Optional[Any] = 'facebook' for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: _lowerCAmelCase :List[str] = 'allenai' def lowerCamelCase_ (UpperCamelCase__ : int ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} _UpperCAmelCase : Tuple = dict((re.sub(r'''@@$''' , '''''' , A__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , A__ ), v) for k, v in d.items() ) _UpperCAmelCase : str = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[F'{k}</w>'] _UpperCAmelCase : List[Any] = d[k] # restore return da def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): # prep assert os.path.exists(A__ ) os.makedirs(A__ , exist_ok=A__ ) print(F'Writing results to {pytorch_dump_folder_path}' ) # handle various types of models _UpperCAmelCase : Union[str, Any] = basename(A__ ) _UpperCAmelCase : Any = dirname(A__ ) _UpperCAmelCase : Union[str, Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel _UpperCAmelCase : Any = cls.hub_models() _UpperCAmelCase : Any = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''} _UpperCAmelCase : List[Any] = '''.''' # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(F'using checkpoint {checkpoint_file}' ) _UpperCAmelCase : List[str] = hub_utils.from_pretrained( A__ , A__ , A__ , archive_map=A__ , **A__ ) _UpperCAmelCase : Tuple = vars(chkpt['''args''']['''model'''] ) _UpperCAmelCase : str = args['''source_lang'''] _UpperCAmelCase : str = args['''target_lang'''] _UpperCAmelCase : List[Any] = dirname(A__ ) _UpperCAmelCase : str = basename(A__ ) # dicts _UpperCAmelCase : Any = os.path.join(A__ , F'dict.{src_lang}.txt' ) _UpperCAmelCase : Optional[Any] = os.path.join(A__ , F'dict.{tgt_lang}.txt' ) _UpperCAmelCase : Optional[int] = Dictionary.load(A__ ) _UpperCAmelCase : List[str] = rewrite_dict_keys(src_dict.indices ) _UpperCAmelCase : Optional[Any] = len(A__ ) _UpperCAmelCase : Any = os.path.join(A__ , '''vocab-src.json''' ) print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' ) with open(A__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab _UpperCAmelCase : Any = True for k in src_vocab.keys(): if not k.islower(): _UpperCAmelCase : List[str] = False break _UpperCAmelCase : Any = Dictionary.load(A__ ) _UpperCAmelCase : Optional[int] = rewrite_dict_keys(tgt_dict.indices ) _UpperCAmelCase : List[Any] = len(A__ ) _UpperCAmelCase : Union[str, Any] = os.path.join(A__ , '''vocab-tgt.json''' ) print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' ) with open(A__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) ) # merges_file (bpecodes) _UpperCAmelCase : Union[str, Any] = os.path.join(A__ , VOCAB_FILES_NAMES['''merges_file'''] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" _UpperCAmelCase : Tuple = os.path.join(A__ , A__ ) if os.path.exists(A__ ): break with open(A__ , encoding='''utf-8''' ) as fin: _UpperCAmelCase : Union[str, Any] = fin.read() _UpperCAmelCase : int = re.sub(r''' \d+$''' , '''''' , A__ , 0 , re.M ) # remove frequency number print(F'Generating {merges_file}' ) with open(A__ , '''w''' , encoding='''utf-8''' ) as fout: fout.write(A__ ) # model config _UpperCAmelCase : Any = os.path.join(A__ , '''config.json''' ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}' assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}' _UpperCAmelCase : List[Any] = { '''architectures''': ['''FSMTForConditionalGeneration'''], '''model_type''': '''fsmt''', '''activation_dropout''': args['''activation_dropout'''], '''activation_function''': '''relu''', '''attention_dropout''': args['''attention_dropout'''], '''d_model''': args['''decoder_embed_dim'''], '''dropout''': args['''dropout'''], '''init_std''': 0.02, '''max_position_embeddings''': args['''max_source_positions'''], '''num_hidden_layers''': args['''encoder_layers'''], '''src_vocab_size''': src_vocab_size, '''tgt_vocab_size''': tgt_vocab_size, '''langs''': [src_lang, tgt_lang], '''encoder_attention_heads''': args['''encoder_attention_heads'''], '''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''], '''encoder_layerdrop''': args['''encoder_layerdrop'''], '''encoder_layers''': args['''encoder_layers'''], '''decoder_attention_heads''': args['''decoder_attention_heads'''], '''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''], '''decoder_layerdrop''': args['''decoder_layerdrop'''], '''decoder_layers''': args['''decoder_layers'''], '''bos_token_id''': 0, '''pad_token_id''': 1, '''eos_token_id''': 2, '''is_encoder_decoder''': True, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_all_embeddings'''], } # good hparam defaults to start with _UpperCAmelCase : int = 5 _UpperCAmelCase : List[Any] = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: _UpperCAmelCase : List[Any] = best_score_hparams[model_dir]['''length_penalty'''] else: _UpperCAmelCase : Optional[int] = 1.0 print(F'Generating {fsmt_model_config_file}' ) with open(A__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) ) # tokenizer config _UpperCAmelCase : Any = os.path.join(A__ , A__ ) _UpperCAmelCase : str = { '''langs''': [src_lang, tgt_lang], '''model_max_length''': 1024, '''do_lower_case''': do_lower_case, } print(F'Generating {fsmt_tokenizer_config_file}' ) with open(A__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) ) # model _UpperCAmelCase : str = chkpt['''models'''][0] _UpperCAmelCase : Optional[int] = model.state_dict() # rename keys to start with 'model.' _UpperCAmelCase : str = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys _UpperCAmelCase : int = [ '''model.model''', '''model.encoder.version''', '''model.decoder.version''', '''model.encoder_embed_tokens.weight''', '''model.decoder_embed_tokens.weight''', '''model.encoder.embed_positions._float_tensor''', '''model.decoder.embed_positions._float_tensor''', ] for k in ignore_keys: model_state_dict.pop(A__ , A__ ) _UpperCAmelCase : str = FSMTConfig.from_pretrained(A__ ) _UpperCAmelCase : Optional[int] = FSMTForConditionalGeneration(A__ ) # check that it loads ok model_new.load_state_dict(A__ , strict=A__ ) # save _UpperCAmelCase : Optional[Any] = os.path.join(A__ , A__ ) print(F'Generating {pytorch_weights_dump_path}' ) torch.save(A__ , A__ ) print('''Conversion is done!''' ) print('''\nLast step is to upload the files to s3''' ) print(F'cd {data_root}' ) print(F'transformers-cli upload {model_dir}' ) if __name__ == "__main__": _lowerCAmelCase :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fsmt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _lowerCAmelCase :Dict = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
506
'''simple docstring''' import requests lowercase__ ='' # <-- Put your OpenWeatherMap appid here! lowercase__ ='https://api.openweathermap.org/data/2.5/' def UpperCamelCase_ ( A__ = "Chicago" , A__ = APPID ): return requests.get(URL_BASE + """weather""" , params=locals() ).json() def UpperCamelCase_ ( A__ = "Kolkata, India" , A__ = APPID ): return requests.get(URL_BASE + """forecast""" , params=locals() ).json() def UpperCamelCase_ ( A__ = 55.68 , A__ = 12.57 , A__ = APPID ): return requests.get(URL_BASE + """onecall""" , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: lowercase__ =input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
263
0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741 _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 _lowerCAmelCase = [0] * n _lowerCAmelCase = [False] * n _lowerCAmelCase = [False] * n def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ): if parent == root: out_edge_count += 1 _lowerCAmelCase = True _lowerCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: _lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: _lowerCAmelCase = True # AP found via cycle if at == low[to]: _lowerCAmelCase = True else: _lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE ) return out_edge_count for i in range(_SCREAMING_SNAKE_CASE ): if not visited[i]: _lowerCAmelCase = 0 _lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = out_edge_count > 1 for x in range(len(_SCREAMING_SNAKE_CASE ) ): if is_art[x] is True: print(_SCREAMING_SNAKE_CASE ) # Adjacency list of graph UpperCAmelCase_ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
664
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex UpperCAmelCase_ = 1_0 UpperCAmelCase_ = 2_5_6 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]: if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS: return None _lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE ) for token in set(_SCREAMING_SNAKE_CASE ): min_hash.update(token.encode() ) return min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]: return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0} class UpperCAmelCase : def __init__( self , *, _lowerCAmelCase = 0.85 , ): _lowerCAmelCase = duplication_jaccard_threshold _lowerCAmelCase = NUM_PERM _lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _lowerCAmelCase = defaultdict(_lowerCAmelCase ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = self._index.query(_lowerCAmelCase ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(_lowerCAmelCase , _lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase ) def __lowerCAmelCase ( self ): _lowerCAmelCase = [] for base, duplicates in self._duplicate_clusters.items(): _lowerCAmelCase = [base] + list(_lowerCAmelCase ) # reformat the cluster to be a list of dict _lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(_lowerCAmelCase ) return duplicate_clusters def __lowerCAmelCase ( self , _lowerCAmelCase ): _lowerCAmelCase = self.get_duplicate_clusters() with open(_lowerCAmelCase , '''w''' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]: _lowerCAmelCase , _lowerCAmelCase = element _lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str: _lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ): di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float: _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) UpperCAmelCase_ = None def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]: _lowerCAmelCase = [] for elementa in cluster: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: _lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold: elementa["copies"] += 1 break else: _lowerCAmelCase = 1 extremes.append(_SCREAMING_SNAKE_CASE ) return extremes def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple: global _shared_dataset _lowerCAmelCase = dataset _lowerCAmelCase = [] _lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ): extremes_list.append(_SCREAMING_SNAKE_CASE ) return extremes_list def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]: _lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} _lowerCAmelCase = {} _lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for extremes in extremes_clusters: for element in extremes: _lowerCAmelCase = element _lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() ) _lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _lowerCAmelCase = element['''base_index'''] in extreme_dict if element["is_extreme"]: _lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies'''] print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' ) print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' ) return ds_filter, duplicate_clusters
664
1
'''simple docstring''' from __future__ import annotations from decimal import Decimal from numpy import array def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(__a ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix _snake_case = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creates a copy of the matrix with swapped positions of the elements _snake_case = [[0.0, 0.0], [0.0, 0.0]] _snake_case, _snake_case = matrix[1][1], matrix[0][0] _snake_case, _snake_case = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(__a ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(__a ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule _snake_case = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creating cofactor matrix _snake_case = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] _snake_case = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) _snake_case = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) _snake_case = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) _snake_case = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) _snake_case = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) _snake_case = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) _snake_case = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) _snake_case = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) _snake_case = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) _snake_case = array(__a ) for i in range(3 ): for j in range(3 ): _snake_case = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix _snake_case = array(__a ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(__a ) # Calculate the inverse of the matrix return [[float(d(__a ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
585
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowercase_ : def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]: a__ =parent a__ =out_indices if out_indices is not None else [4] a__ =stage_names a__ =out_features a__ =backbone a__ =batch_size a__ =image_size a__ =num_channels a__ =use_pretrained_backbone a__ =is_training def __UpperCamelCase ( self) -> Optional[Any]: a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a__ =self.get_config() return config, pixel_values def __UpperCamelCase ( self) -> Tuple: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str: a__ =TimmBackbone(config=lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): a__ =model(lowercase_) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __UpperCamelCase ( self) -> str: a__ =self.prepare_config_and_inputs() a__ , a__ =config_and_inputs a__ ={'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case =(TimmBackbone,) if is_torch_available() else () snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {} snake_case =False snake_case =False snake_case =False snake_case =False def __UpperCamelCase ( self) -> Optional[Any]: a__ =TimmBackboneModelTester(self) a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_) def __UpperCamelCase ( self) -> Dict: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCamelCase ( self) -> str: a__ ='resnet18' a__ ='microsoft/resnet-18' a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_) a__ =AutoBackbone.from_pretrained(lowercase_) self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names)) self.assertEqual(timm_model.channels , transformers_model.channels) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,)) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1]) a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3]) a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices , transformers_model.out_indices) self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features)) self.assertEqual(timm_model.channels , transformers_model.channels) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking') def __UpperCamelCase ( self) -> int: pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute') def __UpperCamelCase ( self) -> List[str]: pass @unittest.skip('TimmBackbone initialization is managed on the timm side') def __UpperCamelCase ( self) -> Any: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds') def __UpperCamelCase ( self) -> Any: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds') def __UpperCamelCase ( self) -> List[str]: pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint') def __UpperCamelCase ( self) -> Optional[int]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone') def __UpperCamelCase ( self) -> Union[str, Any]: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.') def __UpperCamelCase ( self) -> Dict: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.') def __UpperCamelCase ( self) -> List[Any]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone') def __UpperCamelCase ( self) -> List[str]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone') def __UpperCamelCase ( self) -> Union[str, Any]: pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.') def __UpperCamelCase ( self) -> int: pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.') def __UpperCamelCase ( self) -> str: pass @unittest.skip('Safetensors is not supported by timm.') def __UpperCamelCase ( self) -> Optional[int]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def __UpperCamelCase ( self) -> Optional[Any]: pass def __UpperCamelCase ( self) -> Any: a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ =model_class(lowercase_) a__ =inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ =[*signature.parameters.keys()] a__ =['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase_) def __UpperCamelCase ( self) -> Any: a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common() a__ =True a__ =self.has_attentions # no need to test all models as different heads yield the same functionality a__ =self.all_model_classes[0] a__ =model_class(lowercase_) model.to(lowercase_) a__ =self._prepare_for_class(lowercase_ , lowercase_) a__ =model(**lowercase_) a__ =outputs[0][-1] # Encoder-/Decoder-only models a__ =outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: a__ =outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=lowercase_) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def __UpperCamelCase ( self) -> List[str]: a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ =model_class(lowercase_) model.to(lowercase_) model.eval() a__ =model(**lowercase_) self.assertEqual(len(result.feature_maps) , len(config.out_indices)) self.assertEqual(len(model.channels) , len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None a__ =copy.deepcopy(lowercase_) a__ =None a__ =model_class(lowercase_) model.to(lowercase_) model.eval() a__ =model(**lowercase_) self.assertEqual(len(result.feature_maps) , 1) self.assertEqual(len(model.channels) , 1) # Check backbone can be initialized with fresh weights a__ =copy.deepcopy(lowercase_) a__ =False a__ =model_class(lowercase_) model.to(lowercase_) model.eval() a__ =model(**lowercase_)
20
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a : Optional[Any] = logging.get_logger(__name__) a : Optional[int] = { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json''' # See all FNet models at https://huggingface.co/models?filter=fnet } class lowerCamelCase_ ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase = "fnet" def __init__( self , snake_case_=3_2_0_0_0 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=4 , snake_case_=0.0_2 , snake_case_=1e-1_2 , snake_case_=False , snake_case_=5_1_2 , snake_case_=3 , snake_case_=1 , snake_case_=2 , **snake_case_ , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) __lowercase = vocab_size __lowercase = max_position_embeddings __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = initializer_range __lowercase = type_vocab_size __lowercase = layer_norm_eps __lowercase = use_tpu_fourier_optimizations __lowercase = tpu_short_seq_length
527
from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase = ["onnx"] def __init__( self , *snake_case_ , **snake_case_ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''onnx'''] ) @classmethod def A ( cls , *snake_case_ , **snake_case_ ) -> int: '''simple docstring''' requires_backends(cls , ['''onnx'''] ) @classmethod def A ( cls , *snake_case_ , **snake_case_ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''onnx'''] )
527
1
import qiskit def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->qiskit.result.counts.Counts: """simple docstring""" __magic_name__ : int = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register __magic_name__ : Tuple = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # Map the quantum measurement to the classical bits circuit.measure([0], [0] ) # Execute the circuit on the simulator __magic_name__ : str = qiskit.execute(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": print(f"Total count for various states are: {single_qubit_measure(1, 1)}")
154
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL a__ : Tuple = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(SCREAMING_SNAKE_CASE_ ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["pixel_values"] def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ): super().__init__(**a__ ) UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = offset UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ): UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) if "shortest_edge" in size: UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ ) elif "height" in size and "width" in size: UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): UpperCAmelCase = get_size_dict(a__ ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ ) def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ): UpperCAmelCase = image.astype(np.floataa ) if offset: UpperCAmelCase = image - (scale / 2) return rescale(a__ , scale=a__ , data_format=a__ , **a__ ) def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. UpperCAmelCase = to_numpy_array(a__ ) if do_resize: UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ ) if do_center_crop: UpperCAmelCase = self.center_crop(a__ , size=a__ ) if do_rescale: UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ ) if do_normalize: UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ ) UpperCAmelCase = to_channel_dimension_format(a__ , a__ ) return image def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = offset if offset is not None else self.offset UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) if not valid_images(a__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) UpperCAmelCase = make_batched(a__ ) UpperCAmelCase = [ [ self._preprocess_image( image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , ) for img in video ] for video in videos ] UpperCAmelCase = {'''pixel_values''': videos} return BatchFeature(data=a__ , tensor_type=a__ )
51
0
"""simple docstring""" def _lowerCamelCase ( __a ): SCREAMING_SNAKE_CASE_ = current_set.copy() for row_index, row in enumerate(__a ): SCREAMING_SNAKE_CASE_ = row[0] for column_index, column in enumerate(__a ): if magnitude == 0: SCREAMING_SNAKE_CASE_ = column continue SCREAMING_SNAKE_CASE_ = column / magnitude # Subtract to cancel term SCREAMING_SNAKE_CASE_ = current_set[0] SCREAMING_SNAKE_CASE_ = [first_row] SCREAMING_SNAKE_CASE_ = current_set[1::] for row in current_set: SCREAMING_SNAKE_CASE_ = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__a ) continue for column_index in range(len(__a ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__a ) # Create next recursion iteration set if len(final_set[0] ) != 3: SCREAMING_SNAKE_CASE_ = final_set[0] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) SCREAMING_SNAKE_CASE_ = simplify(__a ) for i in range(len(__a ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, __a ) SCREAMING_SNAKE_CASE_ = resultant return final_set def _lowerCamelCase ( __a ): if len(__a ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) SCREAMING_SNAKE_CASE_ = len(__a ) + 1 if any(len(__a ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__a, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__a ) == 1: return [equations[0][-1] / equations[0][0]] SCREAMING_SNAKE_CASE_ = equations.copy() if any(0 in row for row in data_set ): SCREAMING_SNAKE_CASE_ = data_set.copy() SCREAMING_SNAKE_CASE_ = [] for row_index, row in enumerate(__a ): if 0 not in row: SCREAMING_SNAKE_CASE_ = data_set.pop(__a ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, __a ) SCREAMING_SNAKE_CASE_ = data_set.copy() SCREAMING_SNAKE_CASE_ = simplify(__a ) SCREAMING_SNAKE_CASE_ = simplified[::-1] SCREAMING_SNAKE_CASE_ = [] for row in simplified: SCREAMING_SNAKE_CASE_ = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue SCREAMING_SNAKE_CASE_ = row.copy()[: len(__a ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__a ) == 0: solutions.append(0 ) continue SCREAMING_SNAKE_CASE_ = temp_row[1::] SCREAMING_SNAKE_CASE_ = temp_row[::-1] for column_index, column in enumerate(__a ): current_solution -= column * solutions[column_index] solutions.append(__a ) SCREAMING_SNAKE_CASE_ = [] for item in solutions: final.append(float(round(__a, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
628
"""simple docstring""" import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _lowerCamelCase ( __a ): # picklable for multiprocessing return x.sum() def _lowerCamelCase ( __a ): # picklable for multiprocessing return i + 1 @dataclass class snake_case : UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 class snake_case ( __lowercase ): def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = [1, 2] SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2} SCREAMING_SNAKE_CASE_ = {'''a''': [1, 2], '''b''': [3, 4]} SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 1}, '''b''': 2} SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = [2, 3] SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3} SCREAMING_SNAKE_CASE_ = {'''a''': [2, 3], '''b''': [4, 5]} SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 2}, '''b''': 3} SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = 2 self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )} SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 0, '''c''': 2} SCREAMING_SNAKE_CASE_ = { '''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE_ ), '''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE_ ), '''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE_ ), } self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # can't pickle a local lambda map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2} SCREAMING_SNAKE_CASE_ = {'''a''': 3, '''b''': 4} SCREAMING_SNAKE_CASE_ = {'''a''': 5, '''b''': 6} SCREAMING_SNAKE_CASE_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ ) def _lowercase (self ): """simple docstring""" class snake_case : UpperCAmelCase__ = '''bar''' SCREAMING_SNAKE_CASE_ = Foo() self.assertEqual(foo.my_attr , '''bar''' ) with temporary_assignment(SCREAMING_SNAKE_CASE_ , '''my_attr''' , '''BAR''' ): self.assertEqual(foo.my_attr , '''BAR''' ) self.assertEqual(foo.my_attr , '''bar''' ) @pytest.mark.parametrize( '''iterable_length, num_proc, expected_num_proc''', [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ], ) def _lowerCamelCase ( __a, __a, __a ): with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch( '''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool: SCREAMING_SNAKE_CASE_ = {F'{i}': i for i in range(__a )} SCREAMING_SNAKE_CASE_ = map_nested(lambda __a : x + 10, __a, num_proc=__a, parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class snake_case ( __lowercase ): @require_tf def _lowercase (self ): """simple docstring""" import tensorflow as tf from tensorflow.keras import layers SCREAMING_SNAKE_CASE_ = layers.Dense(2 ) def gen_random_output(): SCREAMING_SNAKE_CASE_ = tf.random.uniform((1, 3) ) return model(SCREAMING_SNAKE_CASE_ ).numpy() with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE_ = gen_random_output() with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE_ = gen_random_output() SCREAMING_SNAKE_CASE_ = gen_random_output() np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def _lowercase (self ): """simple docstring""" import torch def gen_random_output(): SCREAMING_SNAKE_CASE_ = torch.nn.Linear(3 , 2 ) SCREAMING_SNAKE_CASE_ = torch.rand(1 , 3 ) return model(SCREAMING_SNAKE_CASE_ ).detach().numpy() with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE_ = gen_random_output() with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE_ = gen_random_output() SCREAMING_SNAKE_CASE_ = gen_random_output() np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def _lowercase (self ): """simple docstring""" def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): SCREAMING_SNAKE_CASE_ = gen_random_output() with temp_seed(42 ): SCREAMING_SNAKE_CASE_ = gen_random_output() SCREAMING_SNAKE_CASE_ = gen_random_output() np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('''input_data''', [{}] ) def _lowerCamelCase ( __a ): SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).data assert output_data == input_data @pytest.mark.parametrize( '''data, expected_output''', [ ({}, []), ([], []), ('''foo''', ['''foo''']), (['''foo''', '''bar'''], ['''foo''', '''bar''']), ([['''foo''', '''bar''']], ['''foo''', '''bar''']), ([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']), ([[['''foo'''], '''bar''']], ['''foo''', '''bar''']), ({'''a''': 1, '''b''': 2}, [1, 2]), ({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]), ({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]), ], ) def _lowerCamelCase ( __a, __a ): SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).flatten() assert output == expected_output def _lowerCamelCase ( ): SCREAMING_SNAKE_CASE_ = A(x=1, y='''foobar''' ) SCREAMING_SNAKE_CASE_ = {'''x''': 1, '''y''': '''foobar'''} assert asdict(__a ) == expected_output SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]} SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]} assert asdict(__a ) == expected_output with pytest.raises(__a ): asdict([1, A(x=10, y='''foo''' )] ) def _lowerCamelCase ( __a ): return text.split() def _lowerCamelCase ( __a ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _lowerCamelCase ( ): with Pool(2 ) as pool: SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(__a ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(__a ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: SCREAMING_SNAKE_CASE_ = [] for yield_time, content in iflatmap_unordered( __a, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__a ) assert out.count('''a''' ) == 2 assert out.count('''b''' ) == 2 assert len(__a ) == 4
628
1
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a :Optional[Any] = logging.get_logger(__name__) class __a : '''simple docstring''' def __init__( self , _a = None , _a = None , _a=None , _a=None ) -> Dict: """simple docstring""" if not conversation_id: SCREAMING_SNAKE_CASE__ : Optional[int] = uuid.uuida() if past_user_inputs is None: SCREAMING_SNAKE_CASE__ : Dict = [] if generated_responses is None: SCREAMING_SNAKE_CASE__ : Tuple = [] SCREAMING_SNAKE_CASE__ : Optional[int] = conversation_id SCREAMING_SNAKE_CASE__ : str = past_user_inputs SCREAMING_SNAKE_CASE__ : int = generated_responses SCREAMING_SNAKE_CASE__ : List[str] = text def __eq__( self , _a ) -> List[Any]: """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def _a ( self , _a , _a = False ) -> Optional[Any]: """simple docstring""" if self.new_user_input: if overwrite: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' f'''with: "{text}".''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = text else: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: SCREAMING_SNAKE_CASE__ : Dict = text def _a ( self ) -> List[str]: """simple docstring""" if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) SCREAMING_SNAKE_CASE__ : int = None def _a ( self , _a ) -> Dict: """simple docstring""" self.generated_responses.append(_lowerCamelCase ) def _a ( self ) -> Tuple: """simple docstring""" for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): SCREAMING_SNAKE_CASE__ : Tuple = """user""" if is_user else """bot""" output += f'''{name} >> {text} \n''' return output @add_end_docstrings( A_ , R""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class __a (A_): '''simple docstring''' def __init__( self , *_a , **_a ) -> Dict: """simple docstring""" super().__init__(*_lowerCamelCase , **_lowerCamelCase ) if self.tokenizer.pad_token_id is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.eos_token def _a ( self , _a=None , _a=None , _a=None , **_a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = {} SCREAMING_SNAKE_CASE__ : Tuple = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} if min_length_for_response is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = min_length_for_response if minimum_tokens is not None: SCREAMING_SNAKE_CASE__ : Dict = minimum_tokens if "max_length" in generate_kwargs: SCREAMING_SNAKE_CASE__ : Any = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: SCREAMING_SNAKE_CASE__ : Any = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(_lowerCamelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , _a , _a=0 , **_a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowerCamelCase , num_workers=_lowerCamelCase , **_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1: return outputs[0] return outputs def _a ( self , _a , _a=32 ) -> Any: """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' """Add user inputs with the conversation\'s `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): SCREAMING_SNAKE_CASE__ : Any = self.tokenizer._build_conversation_input_ids(_lowerCamelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version SCREAMING_SNAKE_CASE__ : List[Any] = self._legacy_parse_and_tokenize(_lowerCamelCase ) if self.framework == "pt": SCREAMING_SNAKE_CASE__ : Tuple = torch.LongTensor([input_ids] ) elif self.framework == "tf": SCREAMING_SNAKE_CASE__ : int = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def _a ( self , _a , _a=10 , **_a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = generate_kwargs.get("""max_length""" , self.model.config.max_length ) SCREAMING_SNAKE_CASE__ : List[Any] = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) SCREAMING_SNAKE_CASE__ : Tuple = max_length - minimum_tokens SCREAMING_SNAKE_CASE__ : str = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: SCREAMING_SNAKE_CASE__ : List[Any] = model_inputs["""attention_mask"""][:, -trim:] SCREAMING_SNAKE_CASE__ : List[Any] = model_inputs.pop("""conversation""" ) SCREAMING_SNAKE_CASE__ : Dict = max_length SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model.generate(**_lowerCamelCase , **_lowerCamelCase ) if self.model.config.is_encoder_decoder: SCREAMING_SNAKE_CASE__ : Any = 1 else: SCREAMING_SNAKE_CASE__ : str = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def _a ( self , _a , _a=True ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = model_outputs["""output_ids"""] SCREAMING_SNAKE_CASE__ : int = self.tokenizer.decode( output_ids[0] , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , ) SCREAMING_SNAKE_CASE__ : int = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(_lowerCamelCase ) return conversation def _a ( self , _a ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.eos_token_id SCREAMING_SNAKE_CASE__ : List[Any] = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) ) if len(_lowerCamelCase ) > self.tokenizer.model_max_length: SCREAMING_SNAKE_CASE__ : str = input_ids[-self.tokenizer.model_max_length :] return input_ids
680
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _UpperCAmelCase ( __lowerCamelCase : str = "isbn/0140328726" ) -> dict: _snake_case = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: _snake_case = f'''{olid} is not a valid Open Library olid''' raise ValueError(__lowerCamelCase ) return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json() def _UpperCAmelCase ( __lowerCamelCase : dict ) -> dict: _snake_case = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } _snake_case = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} _snake_case = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] _snake_case = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = ''', '''.join(__lowerCamelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: UpperCAmelCase__ = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.") continue print(F"\nSearching Open Library for ISBN: {isbn}...\n") try: UpperCAmelCase__ = summarize_book(get_openlibrary_data(F"isbn/{isbn}")) print('\n'.join(F"{key}: {value}" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"Sorry, there are no results for ISBN: {isbn}.")
224
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json", } class _UpperCAmelCase ( lowercase ): lowerCamelCase_ : str = """instructblip_vision_model""" def __init__( self : Union[str, Any] , UpperCAmelCase : Union[str, Any]=14_08 , UpperCAmelCase : Optional[int]=61_44 , UpperCAmelCase : Tuple=39 , UpperCAmelCase : Optional[Any]=16 , UpperCAmelCase : str=2_24 , UpperCAmelCase : Any=14 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : List[Any]=1E-6 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Dict=1E-10 , UpperCAmelCase : str=True , **UpperCAmelCase : Optional[int] , ): super().__init__(**UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Optional[int] = hidden_size SCREAMING_SNAKE_CASE_ :Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE_ :List[str] = num_hidden_layers SCREAMING_SNAKE_CASE_ :List[Any] = num_attention_heads SCREAMING_SNAKE_CASE_ :int = patch_size SCREAMING_SNAKE_CASE_ :str = image_size SCREAMING_SNAKE_CASE_ :str = initializer_range SCREAMING_SNAKE_CASE_ :List[str] = attention_dropout SCREAMING_SNAKE_CASE_ :Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_act SCREAMING_SNAKE_CASE_ :int = qkv_bias @classmethod def _snake_case ( cls : Any , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Dict): cls._set_token_in_kwargs(UpperCAmelCase) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("model_type") == "instructblip": SCREAMING_SNAKE_CASE_ :Optional[Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(UpperCAmelCase , **UpperCAmelCase) class _UpperCAmelCase ( lowercase ): lowerCamelCase_ : int = """instructblip_qformer""" def __init__( self : str , UpperCAmelCase : List[Any]=3_05_22 , UpperCAmelCase : Union[str, Any]=7_68 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : str=12 , UpperCAmelCase : Optional[Any]=30_72 , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : str=5_12 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : Any=1E-12 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Dict="absolute" , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=14_08 , **UpperCAmelCase : List[str] , ): super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase) SCREAMING_SNAKE_CASE_ :str = vocab_size SCREAMING_SNAKE_CASE_ :Dict = hidden_size SCREAMING_SNAKE_CASE_ :int = num_hidden_layers SCREAMING_SNAKE_CASE_ :int = num_attention_heads SCREAMING_SNAKE_CASE_ :Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE_ :Any = intermediate_size SCREAMING_SNAKE_CASE_ :Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE_ :int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ :int = max_position_embeddings SCREAMING_SNAKE_CASE_ :Dict = initializer_range SCREAMING_SNAKE_CASE_ :Any = layer_norm_eps SCREAMING_SNAKE_CASE_ :List[str] = position_embedding_type SCREAMING_SNAKE_CASE_ :int = cross_attention_frequency SCREAMING_SNAKE_CASE_ :List[Any] = encoder_hidden_size @classmethod def _snake_case ( cls : Optional[int] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : int): cls._set_token_in_kwargs(UpperCAmelCase) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("model_type") == "instructblip": SCREAMING_SNAKE_CASE_ :Union[str, Any] = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(UpperCAmelCase , **UpperCAmelCase) class _UpperCAmelCase ( lowercase ): lowerCamelCase_ : Dict = """instructblip""" lowerCamelCase_ : Tuple = True def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Union[str, Any]=32 , **UpperCAmelCase : Dict): super().__init__(**UpperCAmelCase) if vision_config is None: SCREAMING_SNAKE_CASE_ :List[str] = {} logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values.") if qformer_config is None: SCREAMING_SNAKE_CASE_ :Any = {} logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.") if text_config is None: SCREAMING_SNAKE_CASE_ :Optional[int] = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).") SCREAMING_SNAKE_CASE_ :List[Any] = InstructBlipVisionConfig(**UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Dict = InstructBlipQFormerConfig(**UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Dict = text_config["model_type"] if "model_type" in text_config else "opt" SCREAMING_SNAKE_CASE_ :str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase) SCREAMING_SNAKE_CASE_ :Optional[Any] = self.text_config.tie_word_embeddings SCREAMING_SNAKE_CASE_ :Tuple = self.text_config.is_encoder_decoder SCREAMING_SNAKE_CASE_ :int = num_query_tokens SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.vision_config.hidden_size SCREAMING_SNAKE_CASE_ :str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES SCREAMING_SNAKE_CASE_ :Optional[int] = 1.0 SCREAMING_SNAKE_CASE_ :int = 0.02 @classmethod def _snake_case ( cls : Optional[int] , UpperCAmelCase : InstructBlipVisionConfig , UpperCAmelCase : InstructBlipQFormerConfig , UpperCAmelCase : PretrainedConfig , **UpperCAmelCase : int , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase , ) def _snake_case ( self : List[Any]): SCREAMING_SNAKE_CASE_ :Optional[int] = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.vision_config.to_dict() SCREAMING_SNAKE_CASE_ :Optional[int] = self.qformer_config.to_dict() SCREAMING_SNAKE_CASE_ :Tuple = self.text_config.to_dict() SCREAMING_SNAKE_CASE_ :List[Any] = self.__class__.model_type return output
140
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def lowercase ( a , a , a ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :int = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") SCREAMING_SNAKE_CASE_ :Optional[Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(a ): os.makedirs(a ) SCREAMING_SNAKE_CASE_ :List[str] = model.state_dict() def to_tf_var_name(a ): for patt, repl in iter(a ): SCREAMING_SNAKE_CASE_ :Optional[Any] = name.replace(a , a ) return F"bert/{name}" def create_tf_var(a , a , a ): SCREAMING_SNAKE_CASE_ :int = tf.dtypes.as_dtype(tensor.dtype ) SCREAMING_SNAKE_CASE_ :Tuple = tf.get_variable(dtype=a , shape=tensor.shape , name=a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: SCREAMING_SNAKE_CASE_ :Any = to_tf_var_name(a ) SCREAMING_SNAKE_CASE_ :List[str] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): SCREAMING_SNAKE_CASE_ :Tuple = torch_tensor.T SCREAMING_SNAKE_CASE_ :List[Any] = create_tf_var(tensor=a , name=a , session=a ) tf.keras.backend.set_value(a , a ) SCREAMING_SNAKE_CASE_ :Tuple = session.run(a ) print(F"Successfully created {tf_name}: {np.allclose(a , a )}" ) SCREAMING_SNAKE_CASE_ :List[Any] = tf.train.Saver(tf.trainable_variables() ) saver.save(a , os.path.join(a , model_name.replace("-" , "_" ) + ".ckpt" ) ) def lowercase ( a=None ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Tuple = argparse.ArgumentParser() parser.add_argument("--model_name" , type=a , required=a , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=a , default=a , required=a , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=a , required=a , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=a , required=a , help="Directory in which to save tensorflow model" ) SCREAMING_SNAKE_CASE_ :Union[str, Any] = parser.parse_args(a ) SCREAMING_SNAKE_CASE_ :Union[str, Any] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
140
1
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def A (__lowerCamelCase :Union[str, Any] ): return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def A (): _lowerCAmelCase = ArgumentParser( """HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=__lowerCamelCase ) _lowerCAmelCase = parser.add_subparsers(help="""datasets-cli command helpers""" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(__lowerCamelCase ) EnvironmentCommand.register_subcommand(__lowerCamelCase ) TestCommand.register_subcommand(__lowerCamelCase ) RunBeamCommand.register_subcommand(__lowerCamelCase ) DummyDataCommand.register_subcommand(__lowerCamelCase ) # Parse args _lowerCAmelCase , _lowerCAmelCase = parser.parse_known_args() if not hasattr(__lowerCamelCase , """func""" ): parser.print_help() exit(1 ) _lowerCAmelCase = parse_unknown_args(__lowerCamelCase ) # Run _lowerCAmelCase = args.func(__lowerCamelCase , **__lowerCamelCase ) service.run() if __name__ == "__main__": main()
5
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging A_ = ( "https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py" ) A_ = logging.get_logger(__name__) # pylint: disable=invalid-name def _UpperCamelCase ( ): UpperCamelCase_ ="https://pypi.org/pypi/diffusers/json" UpperCamelCase_ =json.loads(request.urlopen(A ).read() )["releases"].keys() return sorted(A , key=lambda A : version.Version(A ) ) def _UpperCamelCase ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(A ) os.makedirs(A , exist_ok=A ) UpperCamelCase_ =Path(A ) / "__init__.py" if not init_path.exists(): init_path.touch() def _UpperCamelCase ( A ): init_hf_modules() UpperCamelCase_ =Path(A ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(A , exist_ok=A ) UpperCamelCase_ =dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def _UpperCamelCase ( A ): with open(A , "r" , encoding="utf-8" ) as f: UpperCamelCase_ =f.read() # Imports of the form `import .xxx` UpperCamelCase_ =re.findall("^\s*import\s+\.(\S+)\s*$" , A , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , A , flags=re.MULTILINE ) # Unique-ify return list(set(A ) ) def _UpperCamelCase ( A ): UpperCamelCase_ =False UpperCamelCase_ =[module_file] UpperCamelCase_ =[] # Let's recurse through all relative imports while not no_change: UpperCamelCase_ =[] for f in files_to_check: new_imports.extend(get_relative_imports(A ) ) UpperCamelCase_ =Path(A ).parent UpperCamelCase_ =[str(module_path / m ) for m in new_imports] UpperCamelCase_ =[f for f in new_import_files if f not in all_relative_imports] UpperCamelCase_ =[f"""{f}.py""" for f in new_import_files] UpperCamelCase_ =len(A ) == 0 all_relative_imports.extend(A ) return all_relative_imports def _UpperCamelCase ( A ): with open(A , "r" , encoding="utf-8" ) as f: UpperCamelCase_ =f.read() # Imports of the form `import xxx` UpperCamelCase_ =re.findall("^\s*import\s+(\S+)\s*$" , A , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , A , flags=re.MULTILINE ) # Only keep the top-level module UpperCamelCase_ =[imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all UpperCamelCase_ =list(set(A ) ) UpperCamelCase_ =[] for imp in imports: try: importlib.import_module(A ) except ImportError: missing_packages.append(A ) if len(A ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"""{", ".join(A )}. Run `pip install {" ".join(A )}`""" ) return get_relative_imports(A ) def _UpperCamelCase ( A , A ): UpperCamelCase_ =module_path.replace(os.path.sep , "." ) UpperCamelCase_ =importlib.import_module(A ) if class_name is None: return find_pipeline_class(A ) return getattr(A , A ) def _UpperCamelCase ( A ): from ..pipelines import DiffusionPipeline UpperCamelCase_ =dict(inspect.getmembers(A , inspect.isclass ) ) UpperCamelCase_ =None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , A ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) UpperCamelCase_ =cls return pipeline_class def _UpperCamelCase ( A , A , A = None , A = False , A = False , A = None , A = None , A = None , A = False , ): UpperCamelCase_ =str(A ) UpperCamelCase_ =os.path.join(A , A ) if os.path.isfile(A ): UpperCamelCase_ =module_file_or_url UpperCamelCase_ ="local" elif pretrained_model_name_or_path.count("/" ) == 0: UpperCamelCase_ =get_diffusers_versions() # cut ".dev0" UpperCamelCase_ ="v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: UpperCamelCase_ =latest_version if latest_version[1:] in available_versions else "main" logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: UpperCamelCase_ =f"""v{revision}""" elif revision == "main": UpperCamelCase_ =revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {", ".join(available_versions + ["main"] )}.""" ) # community pipeline on GitHub UpperCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=A , pipeline=A ) try: UpperCamelCase_ =cached_download( A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , ) UpperCamelCase_ ="git" UpperCamelCase_ =pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached UpperCamelCase_ =hf_hub_download( A , A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , ) UpperCamelCase_ =os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment UpperCamelCase_ =check_imports(A ) # Now we move the module inside our cached dynamic modules. UpperCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(A ) UpperCamelCase_ =Path(A ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(A , submodule_path / module_file ) for module_needed in modules_needed: UpperCamelCase_ =f"""{module_needed}.py""" shutil.copy(os.path.join(A , A ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(A , A ): UpperCamelCase_ =use_auth_token elif use_auth_token is True: UpperCamelCase_ =HfFolder.get_token() else: UpperCamelCase_ =None UpperCamelCase_ =model_info(A , revision=A , token=A ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. UpperCamelCase_ =submodule_path / commit_hash UpperCamelCase_ =full_submodule + os.path.sep + commit_hash create_dynamic_module(A ) if not (submodule_path / module_file).exists(): shutil.copy(A , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( A , f"""{module_needed}.py""" , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , ) return os.path.join(A , A ) def _UpperCamelCase ( A , A , A = None , A = None , A = False , A = False , A = None , A = None , A = None , A = False , **A , ): UpperCamelCase_ =get_cached_module_file( A , A , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , ) return get_class_in_module(A , final_module.replace(".py" , "" ) )
391
0
"""simple docstring""" def __snake_case ( UpperCamelCase ) -> str: """simple docstring""" if not all(char in '''01''' for char in bin_string ): raise ValueError('''Non-binary value was passed to the function''' ) if not bin_string: raise ValueError('''Empty string was passed to the function''' ) a__ = '''''' while len(UpperCamelCase ) % 3 != 0: a__ = '''0''' + bin_string a__ = [ bin_string[index : index + 3] for index in range(len(UpperCamelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: a__ = 0 for index, val in enumerate(UpperCamelCase ): oct_val += int(2 ** (2 - index) * int(UpperCamelCase ) ) oct_string += str(UpperCamelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
158
"""simple docstring""" import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self :List[str] ) -> List[Any]: '''simple docstring''' a__ = logging.get_logger() # the current default level is logging.WARNING a__ = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(__magic_name__ ) def _UpperCamelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' a__ = logging.get_verbosity() a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) a__ = '''Testing 1, 2, 3''' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(__magic_name__ ) as cl: logger.warning(__magic_name__ ) self.assertEqual(cl.out , msg + '''\n''' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(__magic_name__ ) as cl: logger.warning(__magic_name__ ) self.assertEqual(cl.out , '''''' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(__magic_name__ ) as cl: logger.warning(__magic_name__ ) self.assertEqual(cl.out , msg + '''\n''' ) # restore to the original level logging.set_verbosity(__magic_name__ ) @mockenv(TRANSFORMERS_VERBOSITY='''error''' ) def _UpperCamelCase ( self :int ) -> Tuple: '''simple docstring''' transformers.utils.logging._reset_library_root_logger() # this action activates the env var a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) a__ = os.getenv('''TRANSFORMERS_VERBOSITY''' , __magic_name__ ) a__ = logging.log_levels[env_level_str] a__ = logging.get_verbosity() self.assertEqual( __magic_name__ , __magic_name__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , ) # restore to the original level a__ = '''''' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='''super-error''' ) def _UpperCamelCase ( self :Tuple ) -> Tuple: '''simple docstring''' transformers.utils.logging._reset_library_root_logger() a__ = logging.logging.getLogger() with CaptureLogger(__magic_name__ ) as cl: # this action activates the env var logging.get_logger('''transformers.models.bart.tokenization_bart''' ) self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out ) # no need to restore as nothing was changed def _UpperCamelCase ( self :Any ) -> Optional[Any]: '''simple docstring''' transformers.utils.logging._reset_library_root_logger() a__ = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) a__ = '''Testing 1, 2, 3''' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ): # nothing should be logged as env var disables this method with CaptureLogger(__magic_name__ ) as cl: logger.warning_advice(__magic_name__ ) self.assertEqual(cl.out , '''''' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(__magic_name__ ) as cl: logger.warning_advice(__magic_name__ ) self.assertEqual(cl.out , msg + '''\n''' ) def __snake_case ( ) -> Optional[int]: """simple docstring""" disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
158
1
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = """ylacombe/bark-small""" lowercase__ : Dict = tempfile.mkdtemp() lowercase__ : Any = """en_speaker_1""" lowercase__ : Optional[int] = """This is a test string""" lowercase__ : Tuple = """speaker_embeddings_path.json""" lowercase__ : str = """speaker_embeddings""" def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.tmpdirname) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.get_tokenizer() lowercase__ : int = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_) processor.save_pretrained(self.tmpdirname) lowercase__ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase__ : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") lowercase__ : Any = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase__ : Optional[int] = 35 lowercase__ : Tuple = 2 lowercase__ : Dict = 8 lowercase__ : Optional[int] = { """semantic_prompt""": np.ones(SCREAMING_SNAKE_CASE_), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len)), """fine_prompt""": np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset lowercase__ : Tuple = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([])).tolist()) # test loading voice preset from npz file lowercase__ : List[Any] = os.path.join(self.tmpdirname , """file.npz""") np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : str = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([])).tolist()) # test loading voice preset from the hub lowercase__ : int = processor(text=self.input_string , voice_preset=self.voice_preset) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.get_tokenizer() lowercase__ : str = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = processor(text=self.input_string) lowercase__ : List[str] = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
12
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer A__: List[Any] = logging.get_logger(__name__) A__: str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__: List[Any] = { '''vocab_file''': { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt''' ), } } A__: str = { '''junnyu/roformer_chinese_small''': 1536, '''junnyu/roformer_chinese_base''': 1536, '''junnyu/roformer_chinese_char_small''': 512, '''junnyu/roformer_chinese_char_base''': 512, '''junnyu/roformer_small_discriminator''': 128, '''junnyu/roformer_small_generator''': 128, } A__: int = { '''junnyu/roformer_chinese_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_base''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True}, '''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True}, '''junnyu/roformer_small_generator''': {'''do_lower_case''': True}, } class _a ( UpperCamelCase__): """simple docstring""" UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ = RoFormerTokenizer def __init__( self: int , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Any=None , __lowerCamelCase: str=True , __lowerCamelCase: Any="[UNK]" , __lowerCamelCase: int="[SEP]" , __lowerCamelCase: Optional[int]="[PAD]" , __lowerCamelCase: Optional[int]="[CLS]" , __lowerCamelCase: Tuple="[MASK]" , __lowerCamelCase: List[str]=True , __lowerCamelCase: List[Any]=None , **__lowerCamelCase: Dict , ): '''simple docstring''' super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase__: int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("lowercase" , __lowerCamelCase ) != do_lower_case or pre_tok_state.get("strip_accents" , __lowerCamelCase ) != strip_accents ): UpperCamelCase__: int = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) UpperCamelCase__: Any = do_lower_case UpperCamelCase__: Optional[int] = strip_accents UpperCamelCase__: Any = pre_tok_class(**__lowerCamelCase ) UpperCamelCase__: Tuple = do_lower_case def __getstate__( self: Optional[int] ): '''simple docstring''' UpperCamelCase__: List[Any] = self.__dict__.copy() UpperCamelCase__: Dict = BertPreTokenizer() return state def __setstate__( self: Dict , __lowerCamelCase: Optional[Any] ): '''simple docstring''' UpperCamelCase__: str = d UpperCamelCase__: List[Any] = self.__dict__["_tokenizer"].get_vocab() UpperCamelCase__: str = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) ) def UpperCAmelCase_ ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=None ): '''simple docstring''' UpperCamelCase__: int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ): '''simple docstring''' UpperCamelCase__: Tuple = [self.sep_token_id] UpperCamelCase__: str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ): '''simple docstring''' UpperCamelCase__: Dict = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=None , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ): '''simple docstring''' UpperCamelCase__: List[str] = BertPreTokenizer() return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
380
0
'''simple docstring''' import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ) -> List[Any]: A_ : List[str] = ["""a""", """b""", """c"""] # Defaults to last layer if both are None A_ , A_ : Tuple = get_aligned_output_features_output_indices(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , ["""c"""] ) self.assertEqual(_lowerCamelCase , [2] ) # Out indices set to match out features A_ , A_ : Optional[int] = get_aligned_output_features_output_indices(["""a""", """c"""] , _lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , ["""a""", """c"""] ) self.assertEqual(_lowerCamelCase , [0, 2] ) # Out features set to match out indices A_ , A_ : str = get_aligned_output_features_output_indices(_lowerCamelCase , [0, 2] , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , ["""a""", """c"""] ) self.assertEqual(_lowerCamelCase , [0, 2] ) # Out features selected from negative indices A_ , A_ : Any = get_aligned_output_features_output_indices(_lowerCamelCase , [-3, -1] , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , ["""a""", """c"""] ) self.assertEqual(_lowerCamelCase , [-3, -1] ) def UpperCAmelCase_ ( self ) -> Dict: # Stage names must be set with self.assertRaises(_lowerCamelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _lowerCamelCase ) # Out features must be a list with self.assertRaises(_lowerCamelCase ): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(_lowerCamelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(_lowerCamelCase ): verify_out_features_out_indices(_lowerCamelCase , 0 , ["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(_lowerCamelCase ): verify_out_features_out_indices(_lowerCamelCase , (0, 1) , ["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(_lowerCamelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(_lowerCamelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(_lowerCamelCase ): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Tuple = BackboneMixin() A_ : str = ["""a""", """b""", """c"""] A_ : List[Any] = ["""a""", """c"""] A_ : Optional[int] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly A_ : int = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""] ) self.assertEqual(backbone.out_indices , [0, 1] ) A_ : Union[str, Any] = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [-3, -1] )
385
'''simple docstring''' import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: UpperCamelCase__ : Optional[int] = False UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Any = 'ybelkada/fonts' def UpperCAmelCase ( ) -> List[Any]: """simple docstring""" if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use " """Pix2StructImageProcessor. Please upgrade torch.""" ) def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple: """simple docstring""" requires_backends(a_ , ["""torch"""] ) _check_torch_version() A_ : List[Any] = image_tensor.unsqueeze(0 ) A_ : str = torch.nn.functional.unfold(a_ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) A_ : int = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , a_ , a_ , -1 ) A_ : Dict = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase ( a_ , a_ = 3_6 , a_ = "black" , a_ = "white" , a_ = 5 , a_ = 5 , a_ = 5 , a_ = 5 , a_ = None , a_ = None , ) -> Image.Image: """simple docstring""" requires_backends(a_ , """vision""" ) # Add new lines so that each line is no more than 80 characters. A_ : List[str] = textwrap.TextWrapper(width=8_0 ) A_ : str = wrapper.wrap(text=a_ ) A_ : Dict = """\n""".join(a_ ) if font_bytes is not None and font_path is None: A_ : Any = io.BytesIO(a_ ) elif font_path is not None: A_ : Optional[int] = font_path else: A_ : int = hf_hub_download(a_ , """Arial.TTF""" ) A_ : List[Any] = ImageFont.truetype(a_ , encoding="""UTF-8""" , size=a_ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. A_ : int = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , a_ ) ) A_ , A_ , A_ , A_ : Optional[Any] = temp_draw.textbbox((0, 0) , a_ , a_ ) # Create the actual image with a bit of padding around the text. A_ : str = text_width + left_padding + right_padding A_ : List[str] = text_height + top_padding + bottom_padding A_ : Optional[Any] = Image.new("""RGB""" , (image_width, image_height) , a_ ) A_ : Union[str, Any] = ImageDraw.Draw(a_ ) draw.text(xy=(left_padding, top_padding) , text=a_ , fill=a_ , font=a_ ) return image def UpperCAmelCase ( a_ , a_ , **a_ ) -> List[Any]: """simple docstring""" requires_backends(a_ , """vision""" ) # Convert to PIL image if necessary A_ : Union[str, Any] = to_pil_image(a_ ) A_ : Tuple = render_text(a_ , **a_ ) A_ : int = max(header_image.width , image.width ) A_ : Union[str, Any] = int(image.height * (new_width / image.width) ) A_ : Dict = int(header_image.height * (new_width / header_image.width) ) A_ : List[Any] = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary A_ : Tuple = to_numpy_array(a_ ) if infer_channel_dimension_format(a_ ) == ChannelDimension.LAST: A_ : Union[str, Any] = to_channel_dimension_format(a_ , ChannelDimension.LAST ) return new_image class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = ['''flattened_patches'''] def __init__( self , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 2048 , _lowerCamelCase = False , **_lowerCamelCase , ) -> None: super().__init__(**_lowerCamelCase ) A_ : List[str] = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} A_ : Union[str, Any] = do_normalize A_ : Any = do_convert_rgb A_ : int = max_patches A_ : Dict = is_vqa def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> np.ndarray: requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch A_ : List[str] = to_channel_dimension_format(_lowerCamelCase , ChannelDimension.FIRST ) A_ : Union[str, Any] = torch.from_numpy(_lowerCamelCase ) A_ , A_ : Optional[int] = patch_size["""height"""], patch_size["""width"""] A_ , A_ : List[Any] = get_image_size(_lowerCamelCase ) # maximize scale s.t. A_ : Union[str, Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) A_ : List[Any] = max(min(math.floor(scale * image_height / patch_height ) , _lowerCamelCase ) , 1 ) A_ : int = max(min(math.floor(scale * image_width / patch_width ) , _lowerCamelCase ) , 1 ) A_ : Optional[Any] = max(num_feasible_rows * patch_height , 1 ) A_ : Any = max(num_feasible_cols * patch_width , 1 ) A_ : Any = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=_lowerCamelCase , antialias=_lowerCamelCase , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] A_ : str = torch_extract_patches(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A_ : int = patches.shape A_ : Optional[Any] = patches_shape[1] A_ : Optional[int] = patches_shape[2] A_ : Any = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] A_ : Any = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] A_ : Union[str, Any] = torch.arange(_lowerCamelCase ).reshape([rows, 1] ).repeat(1 , _lowerCamelCase ).reshape([rows * columns, 1] ) A_ : Optional[Any] = torch.arange(_lowerCamelCase ).reshape([1, columns] ).repeat(_lowerCamelCase , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] A_ : str = row_ids.to(torch.floataa ) A_ : Optional[int] = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] A_ : Tuple = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] A_ : List[str] = torch.nn.functional.pad(_lowerCamelCase , [0, 0, 0, max_patches - (rows * columns)] ).float() A_ : Any = to_numpy_array(_lowerCamelCase ) return result def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase ) -> np.ndarray: if image.dtype == np.uinta: A_ : Union[str, Any] = image.astype(np.floataa ) # take mean across the whole `image` A_ : str = np.mean(_lowerCamelCase ) A_ : Union[str, Any] = np.std(_lowerCamelCase ) A_ : List[str] = max(_lowerCamelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , **_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ) -> ImageInput: A_ : Dict = do_normalize if do_normalize is not None else self.do_normalize A_ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : Optional[Any] = patch_size if patch_size is not None else self.patch_size A_ : Union[str, Any] = max_patches if max_patches is not None else self.max_patches A_ : Dict = self.is_vqa if kwargs.get("""data_format""" , _lowerCamelCase ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) A_ : int = make_list_of_images(_lowerCamelCase ) if not valid_images(_lowerCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : Tuple = [convert_to_rgb(_lowerCamelCase ) for image in images] # All transformations expect numpy arrays. A_ : Dict = [to_numpy_array(_lowerCamelCase ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) A_ : List[str] = kwargs.pop("""font_bytes""" , _lowerCamelCase ) A_ : Optional[int] = kwargs.pop("""font_path""" , _lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : List[str] = [header_text] * len(_lowerCamelCase ) A_ : Dict = [ render_header(_lowerCamelCase , header_text[i] , font_bytes=_lowerCamelCase , font_path=_lowerCamelCase ) for i, image in enumerate(_lowerCamelCase ) ] if do_normalize: A_ : str = [self.normalize(image=_lowerCamelCase ) for image in images] # convert to torch tensor and permute A_ : Union[str, Any] = [ self.extract_flattened_patches(image=_lowerCamelCase , max_patches=_lowerCamelCase , patch_size=_lowerCamelCase ) for image in images ] # create attention mask in numpy A_ : Optional[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] A_ : Union[str, Any] = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=_lowerCamelCase ) return encoded_outputs
385
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: _UpperCAmelCase : List[Any] = None _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} _UpperCAmelCase : Optional[int] = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } _UpperCAmelCase : List[str] = { '''facebook/nllb-large-en-ro''': 10_24, '''facebook/nllb-200-distilled-600M''': 10_24, } # fmt: off _UpperCAmelCase : Dict = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class __magic_name__ ( __SCREAMING_SNAKE_CASE ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = ['input_ids', 'attention_mask'] UpperCamelCase__ = NllbTokenizer UpperCamelCase__ = [] UpperCamelCase__ = [] def __init__( self , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=False , **snake_case_ , ): # Mask token behave like a normal word, i.e. include the space before it lowercase =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token lowercase =legacy_behaviour super().__init__( vocab_file=snake_case_ , tokenizer_file=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , legacy_behaviour=snake_case_ , **snake_case_ , ) lowercase =vocab_file lowercase =False if not self.vocab_file else True lowercase =FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) lowercase ={ lang_code: self.convert_tokens_to_ids(snake_case_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowercase =src_lang if src_lang is not None else '''eng_Latn''' lowercase =self.convert_tokens_to_ids(self._src_lang ) lowercase =tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _A( self ): return self._src_lang @src_lang.setter def _A( self , snake_case_ ): lowercase =new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _A( self , snake_case_ , snake_case_ = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _A( self , snake_case_ , snake_case_ = None ): lowercase =[self.sep_token_id] lowercase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ): if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) lowercase =src_lang lowercase =self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ ) lowercase =self.convert_tokens_to_ids(snake_case_ ) lowercase =tgt_lang_id return inputs def _A( self , snake_case_ , snake_case_ = "eng_Latn" , snake_case_ = None , snake_case_ = "fra_Latn" , **snake_case_ , ): lowercase =src_lang lowercase =tgt_lang return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ ) def _A( self ): return self.set_src_lang_special_tokens(self.src_lang ) def _A( self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _A( self , snake_case_ ): lowercase =self.convert_tokens_to_ids(snake_case_ ) if self.legacy_behaviour: lowercase =[] lowercase =[self.eos_token_id, self.cur_lang_code] else: lowercase =[self.cur_lang_code] lowercase =[self.eos_token_id] lowercase =self.convert_ids_to_tokens(self.prefix_tokens ) lowercase =self.convert_ids_to_tokens(self.suffix_tokens ) lowercase =processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _A( self , snake_case_ ): lowercase =self.convert_tokens_to_ids(snake_case_ ) if self.legacy_behaviour: lowercase =[] lowercase =[self.eos_token_id, self.cur_lang_code] else: lowercase =[self.cur_lang_code] lowercase =[self.eos_token_id] lowercase =self.convert_ids_to_tokens(self.prefix_tokens ) lowercase =self.convert_ids_to_tokens(self.suffix_tokens ) lowercase =processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _A( self , snake_case_ , snake_case_ = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(snake_case_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return lowercase =os.path.join( snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ): copyfile(self.vocab_file , snake_case_ ) return (out_vocab_file,)
72
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : list )-> list: '''simple docstring''' UpperCAmelCase__ : List[str] = False while is_sorted is False: # Until all the indices are traversed keep looping UpperCAmelCase__ : List[Any] = True for i in range(0 , len(snake_case ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = input_list[i + 1], input_list[i] # swapping if elements not in order UpperCAmelCase__ : int = False for i in range(1 , len(snake_case ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = input_list[i + 1], input_list[i] # swapping if elements not in order UpperCAmelCase__ : str = False return input_list if __name__ == "__main__": print("""Enter list to be sorted""") _lowerCAmelCase : List[str] = [int(x) for x in input().split()] # inputing elements of the list in one line _lowerCAmelCase : Optional[int] = odd_even_sort(input_list) print("""The sorted list is""") print(sorted_list)
438
0
from __future__ import annotations from math import pi, sqrt def __lowercase ( snake_case, snake_case ): """simple docstring""" if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
180
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase_ ( unittest.TestCase ): @property def A ( self ): """simple docstring""" torch.manual_seed(0 ) __magic_name__ :Dict = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model @property def A ( self ): """simple docstring""" torch.manual_seed(0 ) __magic_name__ :List[str] = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , ) return model @property def A ( self ): """simple docstring""" torch.manual_seed(0 ) __magic_name__ :int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.dummy_uncond_unet __magic_name__ :Optional[int] = DDIMScheduler() __magic_name__ :List[str] = self.dummy_vq_model __magic_name__ :Tuple = LDMPipeline(unet=__lowerCAmelCase , vqvae=__lowerCAmelCase , scheduler=__lowerCAmelCase ) ldm.to(__lowerCAmelCase ) ldm.set_progress_bar_config(disable=__lowerCAmelCase ) __magic_name__ :List[Any] = torch.manual_seed(0 ) __magic_name__ :List[str] = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''numpy''' ).images __magic_name__ :List[Any] = torch.manual_seed(0 ) __magic_name__ :Any = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''numpy''' , return_dict=__lowerCAmelCase )[0] __magic_name__ :Any = image[0, -3:, -3:, -1] __magic_name__ :Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __magic_name__ :Union[str, Any] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] ) __magic_name__ :Any = 1E-2 if torch_device != '''mps''' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class lowerCamelCase_ ( unittest.TestCase ): def A ( self ): """simple docstring""" __magic_name__ :Tuple = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' ) ldm.to(__lowerCAmelCase ) ldm.set_progress_bar_config(disable=__lowerCAmelCase ) __magic_name__ :Optional[Any] = torch.manual_seed(0 ) __magic_name__ :Optional[int] = ldm(generator=__lowerCAmelCase , num_inference_steps=5 , output_type='''numpy''' ).images __magic_name__ :Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) __magic_name__ :List[str] = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] ) __magic_name__ :Tuple = 1E-2 if torch_device != '''mps''' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
180
1
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __lowerCamelCase : """simple docstring""" a = 42 a = 42 class __lowerCamelCase : """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : Any): _A : list[list[Edge]] = [[] for _ in range(SCREAMING_SNAKE_CASE)] _A : Optional[int] = size def __getitem__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict): return iter(self._graph[vertex]) @property def A ( self : Optional[Any]): return self._size def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple): if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.') if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).') self._graph[from_vertex].append(Edge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) def A ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple): _A : Optional[Any] = deque([start_vertex]) _A : list[int | None] = [None] * self.size _A : List[Any] = 0 while queue: _A : Dict = queue.popleft() _A : Dict = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _A : Dict = current_distance + edge.weight _A : Optional[int] = distances[edge.destination_vertex] if ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and new_distance >= dest_vertex_distance ): continue _A : List[str] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex) else: queue.append(edge.destination_vertex) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.') return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
128
import itertools import math def lowerCAmelCase__( lowercase : int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase__( ) -> Optional[int]: __snake_case : List[Any] = 2 while True: if is_prime(lowercase ): yield num num += 1 def lowerCAmelCase__( lowercase : int = 1_0001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , lowercase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
243
0
import doctest from collections import deque import numpy as np class __lowercase : """simple docstring""" def __init__( self ) -> None: snake_case : Dict = [2, 1, 2, -1] snake_case : str = [1, 2, 3, 4] def UpperCAmelCase ( self ) -> list[float]: snake_case : List[Any] = len(self.first_signal ) snake_case : Union[str, Any] = len(self.second_signal ) snake_case : Union[str, Any] = max(__a , __a ) # create a zero matrix of max_length x max_length snake_case : List[str] = [[0] * max_length for i in range(__a )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(__a ): snake_case : Any = deque(self.second_signal ) rotated_signal.rotate(__a ) for j, item in enumerate(__a ): matrix[i][j] += item # multiply the matrix with the first signal snake_case : Tuple = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(__a , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
720
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list: for i in range(len(lowercase ) - 1 ,0 ,-1 ): snake_case : Any = False for j in range(lowercase ,0 ,-1 ): if unsorted[j] < unsorted[j - 1]: snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j] snake_case : Dict = True for j in range(lowercase ): if unsorted[j] > unsorted[j + 1]: snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j] snake_case : Tuple = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip() lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')] print(f"""{cocktail_shaker_sort(unsorted) = }""")
684
0
from manim import * class _snake_case ( UpperCAmelCase_ ): def lowercase__ ( self): '''simple docstring''' lowercase__ : str = Rectangle(height=0.5 , width=0.5) lowercase__ : Tuple = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0) lowercase__ : List[Any] = [mem.copy() for i in range(6)] lowercase__ : Tuple = [mem.copy() for i in range(6)] lowercase__ : List[str] = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0) lowercase__ : Tuple = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0) lowercase__ : List[Any] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0) lowercase__ : Dict = Text("""CPU""" , font_size=24) lowercase__ : List[str] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_) cpu.move_to([-2.5, -0.5, 0]) self.add(SCREAMING_SNAKE_CASE_) lowercase__ : str = [mem.copy() for i in range(1)] lowercase__ : Any = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0) lowercase__ : Optional[int] = Text("""GPU""" , font_size=24) lowercase__ : Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_) gpu.align_to(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) gpu.set_x(gpu.get_x() - 1) self.add(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = [mem.copy() for i in range(6)] lowercase__ : str = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0) lowercase__ : Optional[Any] = Text("""Model""" , font_size=24) lowercase__ : Tuple = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_) model.move_to([3, -1.0, 0]) self.play( Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , ) lowercase__ : Optional[Any] = MarkupText( f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , ) lowercase__ : Any = Square(side_length=2.2) key.move_to([-5, 2, 0]) lowercase__ : Optional[int] = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0]) step_a.move_to([2, 2, 0]) self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=2.5) , Write(SCREAMING_SNAKE_CASE_) , Write(SCREAMING_SNAKE_CASE_)) self.add(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = [] lowercase__ : Union[str, Any] = [] lowercase__ : str = [] for i, rect in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7) cpu_target.move_to(SCREAMING_SNAKE_CASE_) cpu_target.generate_target() lowercase__ : Union[str, Any] = 0.4_6 / 4 lowercase__ : Tuple = 0.4_6 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=SCREAMING_SNAKE_CASE_) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0) cpu_targs.append(SCREAMING_SNAKE_CASE_) first_animations.append(rect.animate(run_time=0.5).set_stroke(SCREAMING_SNAKE_CASE_)) second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5)) self.play(*SCREAMING_SNAKE_CASE_) self.play(*SCREAMING_SNAKE_CASE_) self.wait()
12
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __UpperCAmelCase ( _lowerCamelCase ): __lowercase = """dandelin/vilt-b32-finetuned-vqa""" __lowercase = ( """This is a tool that answers a question about an image. It takes an input named `image` which should be the """ """image containing the information, as well as a `question` which should be the question in English. It """ """returns a text that is the answer to the question.""" ) __lowercase = """image_qa""" __lowercase = AutoProcessor __lowercase = AutoModelForVisualQuestionAnswering __lowercase = ["""image""", """text"""] __lowercase = ["""text"""] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" requires_backends(self , ['vision'] ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return self.pre_processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='pt' ) def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" with torch.no_grad(): return self.model(**lowerCAmelCase_ ).logits def lowerCamelCase ( self , lowerCAmelCase_ ): """simple docstring""" _snake_case = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
495
0
import socket def _UpperCAmelCase ( ): """simple docstring""" __lowerCamelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __lowerCamelCase : int = socket.gethostname() __lowerCamelCase : List[str] = 12_312 sock.connect((host, port) ) sock.send(b"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: __lowerCamelCase : Any = sock.recv(1_024 ) if not data: break out_file.write(UpperCAmelCase ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
458
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Union[str, Any] = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[str] = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[str] = ['LayoutLMv2FeatureExtractor'] __UpperCamelCase : Union[str, Any] = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
458
1
'''simple docstring''' import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def lowerCamelCase__ ( __lowercase="" ): snake_case : Union[str, Any] = tempfile.mkdtemp() return os.path.join(__lowercase , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class _a (unittest.TestCase ): '''simple docstring''' def snake_case_ ( self ) -> int: snake_case : Optional[Any] = torch.rand(12 ,dtype=torch.floataa ) - 0.5 snake_case : Tuple = AgentAudio(__a ) snake_case : int = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(__a ,agent_type.to_raw() ,atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(__a ) ) # Ensure that the file contains the same value as the original tensor snake_case , snake_case : str = sf.read(__a ) self.assertTrue(torch.allclose(__a ,torch.tensor(__a ) ,atol=1E-4 ) ) def snake_case_ ( self ) -> Dict: snake_case : Dict = torch.rand(12 ,dtype=torch.floataa ) - 0.5 snake_case : int = get_new_path(suffix=""".wav""" ) sf.write(__a ,__a ,16_000 ) snake_case : Any = AgentAudio(__a ) self.assertTrue(torch.allclose(__a ,agent_type.to_raw() ,atol=1E-4 ) ) self.assertEqual(agent_type.to_string() ,__a ) @require_vision @require_torch class _a (unittest.TestCase ): '''simple docstring''' def snake_case_ ( self ) -> Dict: snake_case : List[Any] = torch.randint(0 ,256 ,(64, 64, 3) ) snake_case : List[Any] = AgentImage(__a ) snake_case : List[str] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(__a ,agent_type._tensor ,atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw() ,Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__a ) ) def snake_case_ ( self ) -> Tuple: snake_case : Any = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" snake_case : Union[str, Any] = Image.open(__a ) snake_case : Any = AgentImage(__a ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__a ) ) def snake_case_ ( self ) -> List[Any]: snake_case : Optional[int] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" snake_case : Union[str, Any] = Image.open(__a ) snake_case : List[Any] = AgentImage(__a ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__a ) ) class _a (unittest.TestCase ): '''simple docstring''' def snake_case_ ( self ) -> Any: snake_case : str = """Hey!""" snake_case : Union[str, Any] = AgentText(__a ) self.assertEqual(__a ,agent_type.to_string() ) self.assertEqual(__a ,agent_type.to_raw() ) self.assertEqual(__a ,__a )
116
'''simple docstring''' import argparse import logging import pickle from collections import Counter logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) lowercase : Any = logging.getLogger(__name__) if __name__ == "__main__": lowercase : List[Any] = argparse.ArgumentParser( description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)""" ) parser.add_argument( """--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset.""" ) parser.add_argument( """--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file.""" ) parser.add_argument("""--vocab_size""", default=3_0522, type=int) lowercase : str = parser.parse_args() logger.info(F"""Loading data from {args.data_file}""") with open(args.data_file, """rb""") as fp: lowercase : int = pickle.load(fp) logger.info("""Counting occurrences for MLM.""") lowercase : List[Any] = Counter() for tk_ids in data: counter.update(tk_ids) lowercase : int = [0] * args.vocab_size for k, v in counter.items(): lowercase : List[Any] = v logger.info(F"""Dump to {args.token_counts_dump}""") with open(args.token_counts_dump, """wb""") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
116
1
"""simple docstring""" import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json', } class __lowercase ( __lowerCamelCase ): snake_case_ = """align_text_model""" def __init__( self : List[Any] ,A : List[str]=30_522 ,A : List[str]=768 ,A : Optional[Any]=12 ,A : int=12 ,A : str=3_072 ,A : Any="gelu" ,A : Optional[Any]=0.1 ,A : str=0.1 ,A : Union[str, Any]=512 ,A : int=2 ,A : Any=0.0_2 ,A : Any=1e-12 ,A : str=0 ,A : Union[str, Any]="absolute" ,A : List[str]=True ,**A : Any ,): '''simple docstring''' super().__init__(**A ) UpperCAmelCase__ : Tuple = vocab_size UpperCAmelCase__ : Tuple = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : List[Any] = num_attention_heads UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : Optional[Any] = intermediate_size UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : int = attention_probs_dropout_prob UpperCAmelCase__ : List[Any] = max_position_embeddings UpperCAmelCase__ : Optional[Any] = type_vocab_size UpperCAmelCase__ : Dict = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : List[str] = position_embedding_type UpperCAmelCase__ : Dict = use_cache UpperCAmelCase__ : Union[str, Any] = pad_token_id @classmethod def __lowercase ( cls : int ,A : Union[str, os.PathLike] ,**A : Optional[Any] ): '''simple docstring''' cls._set_token_in_kwargs(A ) UpperCAmelCase__ : str = cls.get_config_dict(A ,**A ) # get the text config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": UpperCAmelCase__ : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(A ,**A ) class __lowercase ( __lowerCamelCase ): snake_case_ = """align_vision_model""" def __init__( self : Tuple ,A : int = 3 ,A : int = 600 ,A : float = 2.0 ,A : float = 3.1 ,A : int = 8 ,A : List[int] = [3, 3, 5, 3, 5, 5, 3] ,A : List[int] = [32, 16, 24, 40, 80, 112, 192] ,A : List[int] = [16, 24, 40, 80, 112, 192, 320] ,A : List[int] = [] ,A : List[int] = [1, 2, 2, 2, 1, 2, 1] ,A : List[int] = [1, 2, 2, 3, 3, 4, 1] ,A : List[int] = [1, 6, 6, 6, 6, 6, 6] ,A : float = 0.2_5 ,A : str = "swish" ,A : int = 2_560 ,A : str = "mean" ,A : float = 0.0_2 ,A : float = 0.0_0_1 ,A : float = 0.9_9 ,A : float = 0.2 ,**A : Optional[Any] ,): '''simple docstring''' super().__init__(**A ) UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Dict = width_coefficient UpperCAmelCase__ : str = depth_coefficient UpperCAmelCase__ : List[Any] = depth_divisor UpperCAmelCase__ : Tuple = kernel_sizes UpperCAmelCase__ : Dict = in_channels UpperCAmelCase__ : int = out_channels UpperCAmelCase__ : List[str] = depthwise_padding UpperCAmelCase__ : List[str] = strides UpperCAmelCase__ : List[str] = num_block_repeats UpperCAmelCase__ : int = expand_ratios UpperCAmelCase__ : List[str] = squeeze_expansion_ratio UpperCAmelCase__ : Tuple = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dim UpperCAmelCase__ : Union[str, Any] = pooling_type UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = batch_norm_eps UpperCAmelCase__ : Optional[int] = batch_norm_momentum UpperCAmelCase__ : Tuple = drop_connect_rate UpperCAmelCase__ : Tuple = sum(A ) * 4 @classmethod def __lowercase ( cls : Union[str, Any] ,A : Union[str, os.PathLike] ,**A : str ): '''simple docstring''' cls._set_token_in_kwargs(A ) UpperCAmelCase__ : List[Any] = cls.get_config_dict(A ,**A ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": UpperCAmelCase__ : Dict = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(A ,**A ) class __lowercase ( __lowerCamelCase ): snake_case_ = """align""" snake_case_ = True def __init__( self : Optional[Any] ,A : List[Any]=None ,A : List[Any]=None ,A : Any=640 ,A : Tuple=1.0 ,A : List[str]=0.0_2 ,**A : int ,): '''simple docstring''' super().__init__(**A ) if text_config is None: UpperCAmelCase__ : int = {} logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" ) if vision_config is None: UpperCAmelCase__ : Any = {} logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" ) UpperCAmelCase__ : int = AlignTextConfig(**A ) UpperCAmelCase__ : Optional[Any] = AlignVisionConfig(**A ) UpperCAmelCase__ : Tuple = projection_dim UpperCAmelCase__ : str = temperature_init_value UpperCAmelCase__ : Dict = initializer_range @classmethod def __lowercase ( cls : Tuple ,A : AlignTextConfig ,A : AlignVisionConfig ,**A : Tuple ): '''simple docstring''' return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**A ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : List[str] = self.text_config.to_dict() UpperCAmelCase__ : Dict = self.vision_config.to_dict() UpperCAmelCase__ : List[Any] = self.__class__.model_type return output
717
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = TypeVar('DatasetType', Dataset, IterableDataset) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("""Unable to interleave an empty list of datasets.""" ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase , (Dataset, IterableDataset) ): if isinstance(__UpperCamelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " """is an empty dataset dictionary.""" ) raise ValueError( F"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: UpperCAmelCase__ , UpperCAmelCase__ : List[str] = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase , __UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase , __UpperCamelCase ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , stopping_strategy=__UpperCamelCase ) else: return _interleave_iterable_datasets( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , stopping_strategy=__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , ): '''simple docstring''' if not dsets: raise ValueError("""Unable to concatenate an empty list of datasets.""" ) for i, dataset in enumerate(__UpperCamelCase ): if not isinstance(__UpperCamelCase , (Dataset, IterableDataset) ): if isinstance(__UpperCamelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " """is an empty dataset dictionary.""" ) raise ValueError( F"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." ) if i == 0: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( (Dataset, IterableDataset) if isinstance(__UpperCamelCase , __UpperCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__UpperCamelCase , __UpperCamelCase ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , axis=__UpperCamelCase ) else: return _concatenate_iterable_datasets(__UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , axis=__UpperCamelCase )
194
0
"""simple docstring""" import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Union[str, Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } SCREAMING_SNAKE_CASE : Dict = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for attribute in key.split('.' ): A__ = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) if weight_type is not None: A__ = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ).shape else: A__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A__ = value elif weight_type == "weight_g": A__ = value elif weight_type == "weight_v": A__ = value elif weight_type == "bias": A__ = value else: A__ = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ): A__ = [] A__ = fairseq_model.state_dict() A__ = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): A__ = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,hf_model.config.feat_extract_norm == 'group' ,) A__ = True else: for key, mapped_key in MAPPING.items(): A__ = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue A__ = True if "*" in mapped_key: A__ = name.split(lowerCAmelCase__ )[0].split('.' )[-2] A__ = mapped_key.replace('*' ,lowerCAmelCase__ ) if "weight_g" in name: A__ = 'weight_g' elif "weight_v" in name: A__ = 'weight_v' elif "bias" in name: A__ = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj A__ = 'weight' else: A__ = None set_recursively(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): A__ = full_name.split('conv_layers.' )[-1] A__ = name.split('.' ) A__ = int(items[0] ) A__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A__ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A__ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) A__ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A__ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=True ): if config_path is not None: A__ = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ ) else: A__ = UniSpeechSatConfig() A__ = '' if is_finetuned: A__ = UniSpeechSatForCTC(lowerCAmelCase__ ) else: A__ = UniSpeechSatForPreTraining(lowerCAmelCase__ ) A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) A__ = model[0].eval() recursively_load_weights(lowerCAmelCase__ ,lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
260
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class snake_case_ : """simple docstring""" def __init__( self , __a , __a=13 , __a=10 , __a=3 , __a=2 , __a=2 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a="divided_space_time" , __a=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = patch_size A__ = num_frames A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = attention_type A__ = initializer_range A__ = scope A__ = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token A__ = (image_size // patch_size) ** 2 A__ = (num_frames) * self.num_patches_per_frame + 1 def _UpperCAmelCase ( self ): """simple docstring""" A__ = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.num_labels ) A__ = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ): """simple docstring""" A__ = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) A__ = self.num_labels return config def _UpperCAmelCase ( self , __a , __a , __a ): """simple docstring""" A__ = TimesformerModel(config=__a ) model.to(__a ) model.eval() A__ = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , __a , __a , __a ): """simple docstring""" A__ = TimesformerForVideoClassification(__a ) model.to(__a ) model.eval() A__ = model(__a ) # verify the logits shape A__ = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __a ) def _UpperCAmelCase ( self ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class snake_case_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_: Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE_: str = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_: List[Any] = False SCREAMING_SNAKE_CASE_: Dict = False SCREAMING_SNAKE_CASE_: Optional[int] = False SCREAMING_SNAKE_CASE_: Any = False def _UpperCAmelCase ( self ): """simple docstring""" A__ = TimesformerModelTester(self ) A__ = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def _UpperCAmelCase ( self , __a , __a , __a=False ): """simple docstring""" A__ = copy.deepcopy(__a ) if return_labels: if model_class in get_values(__a ): A__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) return inputs_dict def _UpperCAmelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='TimeSformer does not use inputs_embeds' ) def _UpperCAmelCase ( self ): """simple docstring""" pass def _UpperCAmelCase ( self ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def _UpperCAmelCase ( self ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__a ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) def _UpperCAmelCase ( self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _UpperCAmelCase ( self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__a ) @slow def _UpperCAmelCase ( self ): """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TimesformerModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def _UpperCAmelCase ( self ): """simple docstring""" if not self.has_attentions: pass else: A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: A__ = self.model_tester.seq_length A__ = self.model_tester.num_frames A__ = True A__ = False A__ = True A__ = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(__a , __a ) ) A__ = outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(__a , __a ) ) A__ = outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) A__ = len(__a ) # Check attention is always last and order is fine A__ = True A__ = True A__ = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(__a , __a ) ) self.assertEqual(out_len + 1 , len(__a ) ) A__ = outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def _UpperCAmelCase ( self ): """simple docstring""" def check_hidden_states_output(__a , __a , __a ): A__ = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(__a , __a ) ) A__ = outputs.hidden_states A__ = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__a ) , __a ) A__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(__a , __a , __a ) def __lowerCamelCase ( ): A__ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' ,filename='eating_spaghetti.npy' ,repo_type='dataset' ) A__ = np.load(lowerCAmelCase__ ) return list(lowerCAmelCase__ ) @require_torch @require_vision class snake_case_ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCAmelCase ( self ): """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ): """simple docstring""" A__ = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to( __a ) A__ = self.default_image_processor A__ = prepare_video() A__ = image_processor(video[:8] , return_tensors='pt' ).to(__a ) # forward pass with torch.no_grad(): A__ = model(**__a ) # verify the logits A__ = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , __a ) A__ = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
260
1
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any]=0 ): """simple docstring""" _snake_case : Optional[Any] = [] for old_item in old_list: _snake_case : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" ) _snake_case : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" ) _snake_case : Tuple = new_item.replace("""out_layers.0""" , """norm2""" ) _snake_case : Dict = new_item.replace("""out_layers.3""" , """conv2""" ) _snake_case : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _snake_case : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) _snake_case : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict=0 ): """simple docstring""" _snake_case : Dict = [] for old_item in old_list: _snake_case : Dict = old_item _snake_case : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) _snake_case : str = new_item.replace("""norm.bias""" , """group_norm.bias""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _snake_case : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _snake_case : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : List[str]=None ): """simple docstring""" assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _snake_case : Union[str, Any] = old_checkpoint[path] _snake_case : Optional[int] = old_tensor.shape[0] // 3 _snake_case : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _snake_case : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3 _snake_case : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _snake_case , _snake_case , _snake_case : List[str] = old_tensor.split(channels // num_heads , dim=1 ) _snake_case : Union[str, Any] = query.reshape(snake_case__ ) _snake_case : Tuple = key.reshape(snake_case__ ) _snake_case : Any = value.reshape(snake_case__ ) for path in paths: _snake_case : List[Any] = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _snake_case : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _snake_case : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _snake_case : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _snake_case : int = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _snake_case : Dict = old_checkpoint[path["""old"""]][:, :, 0] else: _snake_case : Optional[Any] = old_checkpoint[path["""old"""]] def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" _snake_case : int = {} _snake_case : Tuple = checkpoint["""time_embed.0.weight"""] _snake_case : List[str] = checkpoint["""time_embed.0.bias"""] _snake_case : List[str] = checkpoint["""time_embed.2.weight"""] _snake_case : Tuple = checkpoint["""time_embed.2.bias"""] _snake_case : Dict = checkpoint["""input_blocks.0.0.weight"""] _snake_case : List[Any] = checkpoint["""input_blocks.0.0.bias"""] _snake_case : List[Any] = checkpoint["""out.0.weight"""] _snake_case : Any = checkpoint["""out.0.bias"""] _snake_case : Any = checkpoint["""out.2.weight"""] _snake_case : List[str] = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _snake_case : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _snake_case : Any = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the middle blocks only _snake_case : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _snake_case : Optional[int] = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the output blocks only _snake_case : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _snake_case : List[Any] = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } for i in range(1 , snake_case__ ): _snake_case : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1) _snake_case : int = (i - 1) % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] _snake_case : str = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: _snake_case : Union[str, Any] = checkpoint[ F"input_blocks.{i}.0.op.weight" ] _snake_case : Dict = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue _snake_case : Optional[int] = renew_resnet_paths(snake_case__ ) _snake_case : int = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} _snake_case : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ ) if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : List[str] = { """old""": F"input_blocks.{i}.1", """new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : Optional[int] = { F"input_blocks.{i}.1.qkv.bias": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { """key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , ) _snake_case : int = middle_blocks[0] _snake_case : List[str] = middle_blocks[1] _snake_case : Any = middle_blocks[2] _snake_case : Dict = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Any = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) _snake_case : Dict = renew_attention_paths(snake_case__ ) _snake_case : Tuple = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ ) for i in range(snake_case__ ): _snake_case : Optional[Any] = i // (config["""num_res_blocks"""] + 1) _snake_case : Dict = i % (config["""num_res_blocks"""] + 1) _snake_case : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]] _snake_case : Any = {} for layer in output_block_layers: _snake_case , _snake_case : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case__ ) else: _snake_case : str = [layer_name] if len(snake_case__ ) > 1: _snake_case : Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] _snake_case : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] _snake_case : List[Any] = renew_resnet_paths(snake_case__ ) _snake_case : int = renew_resnet_paths(snake_case__ ) _snake_case : Optional[Any] = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _snake_case : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _snake_case : Any = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] _snake_case : Optional[int] = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(snake_case__ ) == 2: _snake_case : Any = [] if len(snake_case__ ): _snake_case : str = renew_attention_paths(snake_case__ ) _snake_case : str = { """old""": F"output_blocks.{i}.1", """new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } _snake_case : int = { F"output_blocks.{i}.1.qkv.bias": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { """key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", """query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", """value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , ) else: _snake_case : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _snake_case : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] ) _snake_case : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] ) _snake_case : Any = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A_ = parser.parse_args() A_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: A_ = json.loads(f.read()) A_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] A_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: A_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) A_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
28
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase: '''simple docstring''' def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ): '''simple docstring''' _snake_case : int = parent _snake_case : int = batch_size _snake_case : List[Any] = image_size _snake_case : List[str] = num_channels _snake_case : Tuple = num_stages _snake_case : Union[str, Any] = hidden_sizes _snake_case : List[Any] = depths _snake_case : Tuple = is_training _snake_case : List[str] = use_labels _snake_case : Tuple = intermediate_size _snake_case : List[str] = hidden_act _snake_case : Optional[Any] = num_labels _snake_case : Tuple = initializer_range _snake_case : Tuple = out_features _snake_case : Tuple = out_indices _snake_case : Dict = scope def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Any = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size], self.num_labels ) _snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, ) def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ): '''simple docstring''' _snake_case : int = ConvNextVaModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Any = model(a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = ConvNextVaForImageClassification(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[int] = model(a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : List[str] = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : int = model(a_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case : Tuple = None _snake_case : Tuple = ConvNextVaBackbone(config=a_ ) model.to(a_ ) model.eval() _snake_case : List[Any] = model(a_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Any = config_and_inputs _snake_case : str = {"""pixel_values""": pixel_values} return config, inputs_dict def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase__ = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Tuple = ConvNextVaModelTester(self ) _snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self: int ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass def UpperCamelCase_ ( self: int ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : List[Any] = True if model_class.__name__ in [ *get_values(a_ ), *get_values(a_ ), ]: continue _snake_case : Tuple = model_class(a_ ) model.to(a_ ) model.train() _snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Any = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case : Any = False _snake_case : List[Any] = True if ( model_class.__name__ in [*get_values(a_ ), *get_values(a_ )] or not model_class.supports_gradient_checkpointing ): continue _snake_case : Dict = model_class(a_ ) model.to(a_ ) model.gradient_checkpointing_enable() model.train() _snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ ) _snake_case : Optional[int] = model(**a_ ).loss loss.backward() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(a_ ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : int = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1], a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ): _snake_case : Optional[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): _snake_case : Any = model(**self._prepare_for_class(a_, a_ ) ) _snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Optional[int] = self.model_tester.num_stages self.assertEqual(len(a_ ), expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[Any] = True check_hidden_states_output(a_, a_, a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[str] = True check_hidden_states_output(a_, a_, a_ ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def UpperCamelCase_ ( self: Dict ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = ConvNextVaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowercase( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ ) _snake_case : Union[str, Any] = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ ) # forward pass with torch.no_grad(): _snake_case : Optional[int] = model(**a_ ) # verify the logits _snake_case : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, a_ ) _snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
28
1
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> list: lowerCAmelCase__ : Optional[Any] = word.split() def justify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: lowerCAmelCase__ : Union[str, Any] = max_width - width lowerCAmelCase__ : Union[str, Any] = len(UpperCamelCase ) if len(UpperCamelCase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowerCAmelCase__ : Dict = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowerCAmelCase__ : Dict = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowerCAmelCase__ : List[Any] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(UpperCamelCase ): num_spaces_between_words_list[i] += 1 lowerCAmelCase__ : List[str] = [] for i in range(UpperCamelCase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(UpperCamelCase ) lowerCAmelCase__ : List[Any] = [] lowerCAmelCase__ : list[str] = [] lowerCAmelCase__ : Union[str, Any] = 0 for word in words: if width + len(UpperCamelCase ) + len(UpperCamelCase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(UpperCamelCase ) width += len(UpperCamelCase ) else: # justify the line and add it to result answer.append(justify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) ) # reset new line and new width lowerCAmelCase__ , lowerCAmelCase__ : Dict = [word], len(UpperCamelCase ) lowerCAmelCase__ : int = max_width - width - len(UpperCamelCase ) answer.append(''' '''.join(UpperCamelCase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
678
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'} _lowerCAmelCase : Dict = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, } _lowerCAmelCase : Dict = { 'moussaKam/mbarthez': 1024, 'moussaKam/barthez': 1024, 'moussaKam/barthez-orangesum-title': 1024, } _lowerCAmelCase : Optional[Any] = '▁' class lowerCAmelCase ( __snake_case ): _lowerCamelCase : Any = VOCAB_FILES_NAMES _lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : Any = ["""input_ids""", """attention_mask"""] def __init__( self , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__ = None , **snake_case__ , ): lowerCAmelCase : Dict = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) lowerCAmelCase : List[Any] = vocab_file lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) lowerCAmelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} lowerCAmelCase : Dict = len(self.sp_model ) - 1 lowerCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowercase ( self , snake_case__ , snake_case__ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase : str = [self.cls_token_id] lowerCAmelCase : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase ( self , snake_case__ , snake_case__ = None , snake_case__ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) + [1] return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1] def lowercase ( self , snake_case__ , snake_case__ = None ): lowerCAmelCase : Dict = [self.sep_token_id] lowerCAmelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase ( self ): return len(self.sp_model ) def lowercase ( self ): lowerCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase ( self , snake_case__ ): return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def lowercase ( self , snake_case__ ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase : str = self.sp_model.PieceToId(__UpperCamelCase ) return spm_id if spm_id else self.unk_token_id def lowercase ( self , snake_case__ ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(__UpperCamelCase ) def lowercase ( self , snake_case__ ): lowerCAmelCase : int = [] lowerCAmelCase : Union[str, Any] = '' lowerCAmelCase : Union[str, Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCamelCase ) + token lowerCAmelCase : Union[str, Any] = True lowerCAmelCase : List[Any] = [] else: current_sub_tokens.append(__UpperCamelCase ) lowerCAmelCase : int = False out_string += self.sp_model.decode(__UpperCamelCase ) return out_string.strip() def __getstate__( self ): lowerCAmelCase : int = self.__dict__.copy() lowerCAmelCase : List[str] = None return state def __setstate__( self , snake_case__ ): lowerCAmelCase : int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase : List[Any] = {} lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase ( self , snake_case__ , snake_case__ = None ): if not os.path.isdir(__UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase : Any = os.path.join( __UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , 'wb' ) as fi: lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
702
'''simple docstring''' import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class lowerCAmelCase ( a , unittest.TestCase ): _lowerCamelCase : str = PegasusTokenizer _lowerCamelCase : Union[str, Any] = PegasusTokenizerFast _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Optional[Any] = True def lowercase ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase : List[Any] = PegasusTokenizer(snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self ): return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def lowercase ( self , **snake_case__ ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def lowercase ( self , snake_case__ ): return ("This is a test", "This is a test") def lowercase ( self ): lowerCAmelCase : Optional[int] = '</s>' lowerCAmelCase : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def lowercase ( self ): lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '</s>' ) self.assertEqual(vocab_keys[-1] , 'v' ) self.assertEqual(len(snake_case__ ) , 1103 ) def lowercase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def lowercase ( self ): lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase : Optional[Any] = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) lowerCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def lowercase ( self ): lowerCAmelCase : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' lowerCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def lowercase ( self ): lowerCAmelCase : Optional[Any] = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase : List[Any] = 'To ensure a smooth flow of bank resolutions.' lowerCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def lowercase ( self ): lowerCAmelCase : Union[str, Any] = ['This is going to be way too long.' * 150, 'short example'] lowerCAmelCase : int = ['not super long but more than 5 tokens', 'tiny'] lowerCAmelCase : Dict = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' ) lowerCAmelCase : Dict = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. @slow def lowercase ( self ): # fmt: off lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , ) @require_sentencepiece @require_tokenizers class lowerCAmelCase ( a , unittest.TestCase ): _lowerCamelCase : Optional[Any] = PegasusTokenizer _lowerCamelCase : str = PegasusTokenizerFast _lowerCamelCase : Tuple = True _lowerCamelCase : int = True def lowercase ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase : int = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self ): return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def lowercase ( self , **snake_case__ ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def lowercase ( self , snake_case__ ): return ("This is a test", "This is a test") def lowercase ( self ): lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase : List[str] = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) lowerCAmelCase : Dict = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) @require_torch def lowercase ( self ): lowerCAmelCase : Optional[int] = ['This is going to be way too long.' * 1000, 'short example'] lowerCAmelCase : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny'] lowerCAmelCase : List[str] = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' ) lowerCAmelCase : List[str] = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. def lowercase ( self ): lowerCAmelCase : List[str] = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) lowerCAmelCase : Tuple = self._large_tokenizer(snake_case__ ).input_ids self.assertListEqual( snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
646
0
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ ( unittest.TestCase ): '''simple docstring''' def a ( self ): _UpperCamelCase = torch.nn.Linear(10 , 10 ) _UpperCamelCase = torch.optim.SGD(model.parameters() , 0.1 ) _UpperCamelCase = Accelerator() _UpperCamelCase = accelerator.prepare(lowerCAmelCase_ ) try: pickle.loads(pickle.dumps(lowerCAmelCase_ ) ) except Exception as e: self.fail(F"Accelerated optimizer pickling failed with {e}" ) AcceleratorState._reset_state()
138
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase : List[str] = { "Acehnese Arabic": "ace_Arab", "Acehnese Latin": "ace_Latn", "Mesopotamian Arabic": "acm_Arab", "Ta'izzi-Adeni Arabic": "acq_Arab", "Tunisian Arabic": "aeb_Arab", "Afrikaans": "afr_Latn", "South Levantine Arabic": "ajp_Arab", "Akan": "aka_Latn", "Amharic": "amh_Ethi", "North Levantine Arabic": "apc_Arab", "Modern Standard Arabic": "arb_Arab", "Modern Standard Arabic Romanized": "arb_Latn", "Najdi Arabic": "ars_Arab", "Moroccan Arabic": "ary_Arab", "Egyptian Arabic": "arz_Arab", "Assamese": "asm_Beng", "Asturian": "ast_Latn", "Awadhi": "awa_Deva", "Central Aymara": "ayr_Latn", "South Azerbaijani": "azb_Arab", "North Azerbaijani": "azj_Latn", "Bashkir": "bak_Cyrl", "Bambara": "bam_Latn", "Balinese": "ban_Latn", "Belarusian": "bel_Cyrl", "Bemba": "bem_Latn", "Bengali": "ben_Beng", "Bhojpuri": "bho_Deva", "Banjar Arabic": "bjn_Arab", "Banjar Latin": "bjn_Latn", "Standard Tibetan": "bod_Tibt", "Bosnian": "bos_Latn", "Buginese": "bug_Latn", "Bulgarian": "bul_Cyrl", "Catalan": "cat_Latn", "Cebuano": "ceb_Latn", "Czech": "ces_Latn", "Chokwe": "cjk_Latn", "Central Kurdish": "ckb_Arab", "Crimean Tatar": "crh_Latn", "Welsh": "cym_Latn", "Danish": "dan_Latn", "German": "deu_Latn", "Southwestern Dinka": "dik_Latn", "Dyula": "dyu_Latn", "Dzongkha": "dzo_Tibt", "Greek": "ell_Grek", "English": "eng_Latn", "Esperanto": "epo_Latn", "Estonian": "est_Latn", "Basque": "eus_Latn", "Ewe": "ewe_Latn", "Faroese": "fao_Latn", "Fijian": "fij_Latn", "Finnish": "fin_Latn", "Fon": "fon_Latn", "French": "fra_Latn", "Friulian": "fur_Latn", "Nigerian Fulfulde": "fuv_Latn", "Scottish Gaelic": "gla_Latn", "Irish": "gle_Latn", "Galician": "glg_Latn", "Guarani": "grn_Latn", "Gujarati": "guj_Gujr", "Haitian Creole": "hat_Latn", "Hausa": "hau_Latn", "Hebrew": "heb_Hebr", "Hindi": "hin_Deva", "Chhattisgarhi": "hne_Deva", "Croatian": "hrv_Latn", "Hungarian": "hun_Latn", "Armenian": "hye_Armn", "Igbo": "ibo_Latn", "Ilocano": "ilo_Latn", "Indonesian": "ind_Latn", "Icelandic": "isl_Latn", "Italian": "ita_Latn", "Javanese": "jav_Latn", "Japanese": "jpn_Jpan", "Kabyle": "kab_Latn", "Jingpho": "kac_Latn", "Kamba": "kam_Latn", "Kannada": "kan_Knda", "Kashmiri Arabic": "kas_Arab", "Kashmiri Devanagari": "kas_Deva", "Georgian": "kat_Geor", "Central Kanuri Arabic": "knc_Arab", "Central Kanuri Latin": "knc_Latn", "Kazakh": "kaz_Cyrl", "Kabiyè": "kbp_Latn", "Kabuverdianu": "kea_Latn", "Khmer": "khm_Khmr", "Kikuyu": "kik_Latn", "Kinyarwanda": "kin_Latn", "Kyrgyz": "kir_Cyrl", "Kimbundu": "kmb_Latn", "Northern Kurdish": "kmr_Latn", "Kikongo": "kon_Latn", "Korean": "kor_Hang", "Lao": "lao_Laoo", "Ligurian": "lij_Latn", "Limburgish": "lim_Latn", "Lingala": "lin_Latn", "Lithuanian": "lit_Latn", "Lombard": "lmo_Latn", "Latgalian": "ltg_Latn", "Luxembourgish": "ltz_Latn", "Luba-Kasai": "lua_Latn", "Ganda": "lug_Latn", "Luo": "luo_Latn", "Mizo": "lus_Latn", "Standard Latvian": "lvs_Latn", "Magahi": "mag_Deva", "Maithili": "mai_Deva", "Malayalam": "mal_Mlym", "Marathi": "mar_Deva", "Minangkabau Arabic ": "min_Arab", "Minangkabau Latin": "min_Latn", "Macedonian": "mkd_Cyrl", "Plateau Malagasy": "plt_Latn", "Maltese": "mlt_Latn", "Meitei Bengali": "mni_Beng", "Halh Mongolian": "khk_Cyrl", "Mossi": "mos_Latn", "Maori": "mri_Latn", "Burmese": "mya_Mymr", "Dutch": "nld_Latn", "Norwegian Nynorsk": "nno_Latn", "Norwegian Bokmål": "nob_Latn", "Nepali": "npi_Deva", "Northern Sotho": "nso_Latn", "Nuer": "nus_Latn", "Nyanja": "nya_Latn", "Occitan": "oci_Latn", "West Central Oromo": "gaz_Latn", "Odia": "ory_Orya", "Pangasinan": "pag_Latn", "Eastern Panjabi": "pan_Guru", "Papiamento": "pap_Latn", "Western Persian": "pes_Arab", "Polish": "pol_Latn", "Portuguese": "por_Latn", "Dari": "prs_Arab", "Southern Pashto": "pbt_Arab", "Ayacucho Quechua": "quy_Latn", "Romanian": "ron_Latn", "Rundi": "run_Latn", "Russian": "rus_Cyrl", "Sango": "sag_Latn", "Sanskrit": "san_Deva", "Santali": "sat_Olck", "Sicilian": "scn_Latn", "Shan": "shn_Mymr", "Sinhala": "sin_Sinh", "Slovak": "slk_Latn", "Slovenian": "slv_Latn", "Samoan": "smo_Latn", "Shona": "sna_Latn", "Sindhi": "snd_Arab", "Somali": "som_Latn", "Southern Sotho": "sot_Latn", "Spanish": "spa_Latn", "Tosk Albanian": "als_Latn", "Sardinian": "srd_Latn", "Serbian": "srp_Cyrl", "Swati": "ssw_Latn", "Sundanese": "sun_Latn", "Swedish": "swe_Latn", "Swahili": "swh_Latn", "Silesian": "szl_Latn", "Tamil": "tam_Taml", "Tatar": "tat_Cyrl", "Telugu": "tel_Telu", "Tajik": "tgk_Cyrl", "Tagalog": "tgl_Latn", "Thai": "tha_Thai", "Tigrinya": "tir_Ethi", "Tamasheq Latin": "taq_Latn", "Tamasheq Tifinagh": "taq_Tfng", "Tok Pisin": "tpi_Latn", "Tswana": "tsn_Latn", "Tsonga": "tso_Latn", "Turkmen": "tuk_Latn", "Tumbuka": "tum_Latn", "Turkish": "tur_Latn", "Twi": "twi_Latn", "Central Atlas Tamazight": "tzm_Tfng", "Uyghur": "uig_Arab", "Ukrainian": "ukr_Cyrl", "Umbundu": "umb_Latn", "Urdu": "urd_Arab", "Northern Uzbek": "uzn_Latn", "Venetian": "vec_Latn", "Vietnamese": "vie_Latn", "Waray": "war_Latn", "Wolof": "wol_Latn", "Xhosa": "xho_Latn", "Eastern Yiddish": "ydd_Hebr", "Yoruba": "yor_Latn", "Yue Chinese": "yue_Hant", "Chinese Simplified": "zho_Hans", "Chinese Traditional": "zho_Hant", "Standard Malay": "zsm_Latn", "Zulu": "zul_Latn", } class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): lowercase__ = "facebook/nllb-200-distilled-600M" lowercase__ = ( "This is a tool that translates text from a language to another. It takes three inputs: `text`, which should " "be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, " "which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in " "plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`." ) lowercase__ = "translator" lowercase__ = AutoTokenizer lowercase__ = AutoModelForSeqaSeqLM lowercase__ = LANGUAGE_CODES lowercase__ = ["text", "text", "text"] lowercase__ = ["text"] def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple): """simple docstring""" if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''') if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''') lowercase_ = self.lang_to_code[src_lang] lowercase_ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowerCAmelCase_ , return_tensors="""pt""" , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_) def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : int): """simple docstring""" return self.model.generate(**lowerCAmelCase_) def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str): """simple docstring""" return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase_)
567
0
"""simple docstring""" def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" return "".join(chr(ord(__lowerCamelCase ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
625
"""simple docstring""" _lowercase : Optional[Any] = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
625
1
from __future__ import annotations def a_ ( __magic_name__ ) -> Any: """simple docstring""" snake_case : List[str] = 0.00 snake_case : List[Any] = 0 for resistor in resistors: if resistor <= 0: snake_case : Dict = F"Resistor at index {index} has a negative or zero value!" raise ValueError(_snake_case ) first_sum += 1 / float(_snake_case ) index += 1 return 1 / first_sum def a_ ( __magic_name__ ) -> Optional[int]: """simple docstring""" snake_case : Any = 0.00 snake_case : Dict = 0 for resistor in resistors: sum_r += resistor if resistor < 0: snake_case : Optional[Any] = F"Resistor at index {index} has a negative value!" raise ValueError(_snake_case ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
598
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset SCREAMING_SNAKE_CASE__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class snake_case (nn.Module ): def __init__( self ,UpperCAmelCase_ ) -> Dict: super().__init__() lowercase__ = torchvision.models.resnetaaa(pretrained=UpperCAmelCase_ ) lowercase__ = list(model.children() )[:-2] lowercase__ = nn.Sequential(*UpperCAmelCase_ ) lowercase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self ,UpperCAmelCase_ ) -> str: # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 lowercase__ = self.pool(self.model(UpperCAmelCase_ ) ) lowercase__ = torch.flatten(UpperCAmelCase_ ,start_dim=2 ) lowercase__ = out.transpose(1 ,2 ).contiguous() return out # BxNx2048 class snake_case (UpperCamelCase ): def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict: lowercase__ = [json.loads(UpperCAmelCase_ ) for l in open(UpperCAmelCase_ )] lowercase__ = os.path.dirname(UpperCAmelCase_ ) lowercase__ = tokenizer lowercase__ = labels lowercase__ = len(UpperCAmelCase_ ) lowercase__ = max_seq_length lowercase__ = transforms def __len__( self ) -> Optional[Any]: return len(self.data ) def __getitem__( self ,UpperCAmelCase_ ) -> Optional[int]: lowercase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] ,add_special_tokens=UpperCAmelCase_ ) ) lowercase__ , lowercase__ , lowercase__ = sentence[0], sentence[1:-1], sentence[-1] lowercase__ = sentence[: self.max_seq_length] lowercase__ = torch.zeros(self.n_classes ) lowercase__ = 1 lowercase__ = Image.open(os.path.join(self.data_dir ,self.data[index]["img"] ) ).convert("RGB" ) lowercase__ = self.transforms(UpperCAmelCase_ ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self ) -> str: lowercase__ = Counter() for row in self.data: label_freqs.update(row["label"] ) return label_freqs def lowerCamelCase ( _snake_case : Optional[int] ): '''simple docstring''' lowercase__ = [len(row["sentence"] ) for row in batch] lowercase__ , lowercase__ = len(_snake_case ), max(_snake_case ) lowercase__ = torch.zeros(_snake_case ,_snake_case ,dtype=torch.long ) lowercase__ = torch.zeros(_snake_case ,_snake_case ,dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_snake_case ,_snake_case ) ): lowercase__ = input_row["sentence"] lowercase__ = 1 lowercase__ = torch.stack([row["image"] for row in batch] ) lowercase__ = torch.stack([row["label"] for row in batch] ) lowercase__ = torch.stack([row["image_start_token"] for row in batch] ) lowercase__ = torch.stack([row["image_end_token"] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ): '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ): '''simple docstring''' return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] ,std=[0.12_221_994, 0.12_145_835, 0.14_380_469] ,), ] )
267
0
def __UpperCamelCase ( snake_case = 5_0_0_0_0_0_0_0 ) -> int: '''simple docstring''' __A = set() __A = int((limit - 2_4) ** (1 / 2) ) __A = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , snake_case ) ) ) for primea in primes: __A = primea * primea for primea in primes: __A = primea * primea * primea if square + cube >= limit - 1_6: break for primea in primes: __A = primea * primea * primea * primea __A = square + cube + tetr if total >= limit: break ret.add(snake_case ) return len(snake_case ) if __name__ == "__main__": print(F"""{solution() = }""")
341
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _UpperCamelCase : str = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""") def __UpperCamelCase ( snake_case , snake_case , snake_case = 1_6_0_0_0 ) -> Tuple: '''simple docstring''' __A = int(round(sample_rate * max_length ) ) if len(snake_case ) <= sample_length: return wav __A = randint(0 , len(snake_case ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _lowerCAmelCase: """simple docstring""" lowerCamelCase__ = field(default=_a , metadata={'''help''': '''Name of a dataset from the datasets package'''}) lowerCamelCase__ = field( default=_a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''}) lowerCamelCase__ = field( default=_a , metadata={'''help''': '''A file containing the training audio paths and labels.'''}) lowerCamelCase__ = field( default=_a , metadata={'''help''': '''A file containing the validation audio paths and labels.'''}) lowerCamelCase__ = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) lowerCamelCase__ = field( default='''validation''' , metadata={ '''help''': ( '''The name of the training data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) lowerCamelCase__ = field( default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , ) lowerCamelCase__ = field( default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''}) lowerCamelCase__ = field( default=_a , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCamelCase__ = field( default=_a , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) lowerCamelCase__ = field( default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , ) @dataclass class _lowerCAmelCase: """simple docstring""" lowerCamelCase__ = field( default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) lowerCamelCase__ = field( default=_a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''}) lowerCamelCase__ = field( default=_a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''}) lowerCamelCase__ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCamelCase__ = field( default=_a , metadata={'''help''': '''Name or path of preprocessor config.'''}) lowerCamelCase__ = field( default=_a , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''}) lowerCamelCase__ = field( default=_a , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''}) lowerCamelCase__ = field( default=_a , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCamelCase__ = field( default=_a , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''}) lowerCamelCase__ = field( default=_a , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def SCREAMING_SNAKE_CASE__ ( self )-> List[str]: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , UpperCAmelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def __UpperCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __A , __A , __A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __A , __A , __A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __A = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} " + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. __A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. __A = DatasetDict() __A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) __A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. " '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' F"{', '.join(raw_datasets['train'].column_names )}." ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. " '''Make sure to set `--label_column_name` to the correct text column - one of ''' F"{', '.join(raw_datasets['train'].column_names )}." ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy __A = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. __A = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) __A = feature_extractor.model_input_names[0] def train_transforms(snake_case ): __A = [] for audio in batch[data_args.audio_column_name]: __A = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(snake_case ) __A = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate ) __A = {model_input_name: inputs.get(snake_case )} __A = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(snake_case ): __A = [audio['''array'''] for audio in batch[data_args.audio_column_name]] __A = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate ) __A = {model_input_name: inputs.get(snake_case )} __A = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. __A = raw_datasets['''train'''].features[data_args.label_column_name].names __A , __A = {}, {} for i, label in enumerate(snake_case ): __A = str(snake_case ) __A = label # Load the accuracy metric from the datasets package __A = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(snake_case ): __A = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=snake_case , references=eval_pred.label_ids ) __A = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case ) , labelaid=snake_case , idalabel=snake_case , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __A = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: __A = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(snake_case , output_all_columns=snake_case ) if training_args.do_eval: if data_args.max_eval_samples is not None: __A = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(snake_case , output_all_columns=snake_case ) # Initialize our trainer __A = Trainer( model=snake_case , args=snake_case , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=snake_case , tokenizer=snake_case , ) # Training if training_args.do_train: __A = None if training_args.resume_from_checkpoint is not None: __A = training_args.resume_from_checkpoint elif last_checkpoint is not None: __A = last_checkpoint __A = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __A = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __A = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
341
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCamelCase( __UpperCAmelCase ): lowercase_ : Union[str, Any] = 42 lowercase_ : Union[str, Any] = 42 def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=lowerCAmelCase_, scheduler=lowerCAmelCase_) @torch.no_grad() def __call__( self, lowerCamelCase = 1, lowerCamelCase = 50, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, **lowerCamelCase, ) -> str: """simple docstring""" _lowercase : str = self.unet.config.sample_size _lowercase : Optional[int] = (batch_size, 3, img_size, img_size) _lowercase : str = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) _lowercase : Optional[Any] = randn_tensor(lowerCAmelCase_, generator=lowerCAmelCase_, device=self.device) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(lowerCAmelCase_) for t in self.progress_bar(self.scheduler.timesteps): # here sigma_t == t_i from the paper _lowercase : List[Any] = self.scheduler.schedule[t] _lowercase : str = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat _lowercase , _lowercase : int = self.scheduler.add_noise_to_input(lowerCAmelCase_, lowerCAmelCase_, generator=lowerCAmelCase_) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. _lowercase : Dict = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev _lowercase : int = self.scheduler.step(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. _lowercase : List[str] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample _lowercase : Union[str, Any] = self.scheduler.step_correct( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, step_output.prev_sample, step_output['derivative'], ) _lowercase : Any = step_output.prev_sample _lowercase : str = (sample / 2 + 0.5).clamp(0, 1) _lowercase : List[str] = sample.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": _lowercase : Any = self.numpy_to_pil(lowerCAmelCase_) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase_)
89
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[Any] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { "facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json", } class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): lowercase__ = "timesformer" def __init__( self : int , lowerCAmelCase_ : Dict=2_2_4 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Dict=7_6_8 , lowerCAmelCase_ : Dict=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : str=3_0_7_2 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : List[str]=1E-6 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[int]="divided_space_time" , lowerCAmelCase_ : Optional[Any]=0 , **lowerCAmelCase_ : Dict , ): """simple docstring""" super().__init__(**lowerCAmelCase_) lowercase_ = image_size lowercase_ = patch_size lowercase_ = num_channels lowercase_ = num_frames lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = qkv_bias lowercase_ = attention_type lowercase_ = drop_path_rate
567
0
'''simple docstring''' def __UpperCamelCase ( __lowerCamelCase : int ) -> None: '''simple docstring''' _a = generate_pascal_triangle(__lowerCamelCase ) for row_idx in range(__lowerCamelCase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=" " ) else: print(triangle[row_idx][col_idx] , end="" ) print() def __UpperCamelCase ( __lowerCamelCase : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) _a = [] for current_row_idx in range(__lowerCamelCase ): _a = populate_current_row(__lowerCamelCase , __lowerCamelCase ) triangle.append(__lowerCamelCase ) return triangle def __UpperCamelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int ) -> list[int]: '''simple docstring''' _a = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 _a , _a = 1, 1 for current_col_idx in range(1 , __lowerCamelCase ): calculate_current_element( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return current_row def __UpperCamelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , ) -> None: '''simple docstring''' _a = triangle[current_row_idx - 1][current_col_idx - 1] _a = triangle[current_row_idx - 1][current_col_idx] _a = above_to_left_elt + above_to_right_elt def __UpperCamelCase ( __lowerCamelCase : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) _a = [[1]] for row_index in range(1 , __lowerCamelCase ): _a = [0] + result[-1] + [0] _a = row_index + 1 # Calculate the number of distinct elements in a row _a = sum(divmod(__lowerCamelCase , 2 ) ) _a = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] _a = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() _a = row_first_half + row_second_half result.append(__lowerCamelCase ) return result def __UpperCamelCase ( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(__lowerCamelCase : Callable , __lowerCamelCase : int ) -> None: _a = F"{func.__name__}({value})" _a = timeit(F"__main__.{call}" , setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(__lowerCamelCase , __lowerCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
276
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowercase__ = object() # For specifying empty leaf dict `{}` lowercase__ = object() def __UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Union[str, Any]: '''simple docstring''' _a = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ): _a = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase , ks[i:] )] if matches and all(__lowerCamelCase ): return True return False def __UpperCamelCase ( __lowerCamelCase : int ) -> Union[str, Any]: '''simple docstring''' def replace(__lowerCamelCase : Tuple , __lowerCamelCase : Any ): for rule, replacement in rules: if _match(__lowerCamelCase , __lowerCamelCase ): return replacement return val return replace def __UpperCamelCase ( ) -> Tuple: '''simple docstring''' return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , __lowerCamelCase )), (("transformer", "wte", "embedding"), P("mp" , __lowerCamelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , __lowerCamelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCamelCase , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , __lowerCamelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __UpperCamelCase ( __lowerCamelCase : Dict ) -> Tuple: '''simple docstring''' _a = _get_partition_rules() _a = _replacement_rules(__lowerCamelCase ) _a = {k: _unmatched for k in flatten_dict(__lowerCamelCase )} _a = {k: replace(__lowerCamelCase , __lowerCamelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCamelCase ) )
276
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __snake_case : Union[str, Any] = logging.getLogger(__name__) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Dict ): return (preds == labels).mean() @dataclass class A : __UpperCAmelCase : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __UpperCAmelCase : Optional[str] = field( default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __UpperCAmelCase : Optional[str] = field( default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __UpperCAmelCase : Optional[str] = field( default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class A : __UpperCAmelCase : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) __UpperCAmelCase : str = field(metadata={"""help""": """Should contain the data files for the task."""} ) __UpperCAmelCase : int = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __UpperCAmelCase : bool = field( default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _lowercase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _a , _a , _a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", lowerCamelCase__ ) # Set seed set_seed(training_args.seed ) try: _a = processors[data_args.task_name]() _a = processor.get_labels() _a = len(lowerCamelCase__ ) except KeyError: raise ValueError("Task not found: %s" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowerCamelCase__, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, ) _a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) _a = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=lowerCamelCase__, cache_dir=model_args.cache_dir, ) # Get datasets _a = ( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=lowerCamelCase__, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) _a = ( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=lowerCamelCase__, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase__ : EvalPrediction ) -> Dict: _a = np.argmax(p.predictions, axis=1 ) return {"acc": simple_accuracy(lowerCamelCase__, p.label_ids )} # Data collator _a = DataCollatorWithPadding(lowerCamelCase__, pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _a = Trainer( model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=lowerCamelCase__, eval_dataset=lowerCamelCase__, compute_metrics=lowerCamelCase__, data_collator=lowerCamelCase__, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _a = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) _a = trainer.evaluate() _a = os.path.join(training_args.output_dir, "eval_results.txt" ) if trainer.is_world_master(): with open(lowerCamelCase__, "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s", lowerCamelCase__, lowerCamelCase__ ) writer.write("%s = %s\n" % (key, value) ) results.update(lowerCamelCase__ ) return results def _lowercase ( lowerCamelCase__ : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
131
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class A : def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=6_4 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Optional[int]: _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_input_mask _a = use_token_type_ids _a = use_labels _a = vocab_size _a = hidden_size _a = embedding_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = type_sequence_label_size _a = initializer_range _a = num_labels _a = num_choices _a = scope def __lowerCAmelCase ( self ) -> List[Any]: _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = None if self.use_input_mask: _a = random_attention_mask([self.batch_size, self.seq_length] ) _a = None if self.use_token_type_ids: _a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a = None _a = None _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a = ids_tensor([self.batch_size] , self.num_choices ) _a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> int: return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]: _a = MegatronBertModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _a = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ ) _a = model(snake_case_ , token_type_ids=snake_case_ ) _a = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict: _a = MegatronBertForMaskedLM(config=snake_case_ ) model.to(snake_case_ ) model.eval() _a = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int: _a = MegatronBertForCausalLM(config=snake_case_ ) model.to(snake_case_ ) model.eval() _a = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]: _a = MegatronBertForNextSentencePrediction(config=snake_case_ ) model.to(snake_case_ ) model.eval() _a = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]: _a = MegatronBertForPreTraining(config=snake_case_ ) model.to(snake_case_ ) model.eval() _a = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int: _a = MegatronBertForQuestionAnswering(config=snake_case_ ) model.to(snake_case_ ) model.eval() _a = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int: _a = self.num_labels _a = MegatronBertForSequenceClassification(snake_case_ ) model.to(snake_case_ ) model.eval() _a = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str: _a = self.num_labels _a = MegatronBertForTokenClassification(config=snake_case_ ) model.to(snake_case_ ) model.eval() _a = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str: _a = self.num_choices _a = MegatronBertForMultipleChoice(config=snake_case_ ) model.to(snake_case_ ) model.eval() _a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = self.prepare_config_and_inputs() ( ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ) = config_and_inputs _a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A ( a , a , unittest.TestCase ): __UpperCAmelCase : int = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) __UpperCAmelCase : Optional[Any] = ( { """feature-extraction""": MegatronBertModel, """fill-mask""": MegatronBertForMaskedLM, """question-answering""": MegatronBertForQuestionAnswering, """text-classification""": MegatronBertForSequenceClassification, """text-generation""": MegatronBertForCausalLM, """token-classification""": MegatronBertForTokenClassification, """zero-shot""": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase : Tuple = True # test_resize_embeddings = False __UpperCAmelCase : Dict = False def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Tuple: _a = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) if return_labels: if model_class in get_values(snake_case_ ): _a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ ) _a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case_ ) return inputs_dict def __lowerCAmelCase ( self ) -> Tuple: _a = MegatronBertModelTester(self ) _a = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 ) def __lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> List[str]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case_ ) def __lowerCAmelCase ( self ) -> Any: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case_ ) def __lowerCAmelCase ( self ) -> Tuple: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case_ ) def __lowerCAmelCase ( self ) -> Tuple: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case_ ) def __lowerCAmelCase ( self ) -> Tuple: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case_ ) def __lowerCAmelCase ( self ) -> List[str]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case_ ) def __lowerCAmelCase ( self ) -> List[Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case_ ) def _lowercase ( lowerCamelCase__ : Dict ): return torch.tensor( lowerCamelCase__, dtype=torch.long, device=lowerCamelCase__, ) __snake_case : int = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): @slow @unittest.skip("Model is not available." ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = "nvidia/megatron-bert-uncased-345m" if "MYDIR" in os.environ: _a = os.path.join(os.environ["MYDIR"] , snake_case_ ) _a = MegatronBertModel.from_pretrained(snake_case_ ) model.to(snake_case_ ) model.half() _a = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] ) with torch.no_grad(): _a = model(snake_case_ )[0] _a = torch.Size((1, 9, 1_0_2_4) ) self.assertEqual(output.shape , snake_case_ ) _a = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728] for ii in range(3 ): for jj in range(3 ): _a = output[0, ii, jj] _a = expected[3 * ii + jj] _a = "ii={} jj={} a={} b={}".format(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) self.assertTrue(math.isclose(snake_case_ , snake_case_ , rel_tol=snake_case_ , abs_tol=snake_case_ ) , msg=snake_case_ )
131
1
'''simple docstring''' import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase: List[Any] = logging.get_logger(__name__) lowerCAmelCase: Dict = { 'vocab_file': 'vocab.txt', 'merges_file': 'bpe.codes', } lowerCAmelCase: List[Any] = { 'vocab_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt', }, 'merges_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes', }, } lowerCAmelCase: Optional[Any] = { 'vinai/phobert-base': 2_5_6, 'vinai/phobert-large': 2_5_6, } def lowerCamelCase__ ( _A ): a : List[Any] = set() a : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) a : Tuple = char a : Any = set(_A ) return pairs class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any="<s>" , __snake_case : List[Any]="</s>" , __snake_case : Union[str, Any]="</s>" , __snake_case : List[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : List[Any]="<mask>" , **__snake_case : int , ): super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , **__snake_case , ) a : Optional[int] = vocab_file a : Optional[Any] = merges_file a : int = {} a : List[str] = 0 a : Union[str, Any] = 1 a : Optional[Any] = 2 a : List[Any] = 3 self.add_from_file(__snake_case ) a : Dict = {v: k for k, v in self.encoder.items()} with open(__snake_case , encoding='utf-8' ) as merges_handle: a : Optional[int] = merges_handle.read().split('\n' )[:-1] a : Any = [tuple(merge.split()[:-1] ) for merge in merges] a : List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) a : List[str] = {} def lowercase_ ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a : Union[str, Any] = [self.cls_token_id] a : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) if token_ids_a is None: return [1] + ([0] * len(__snake_case )) + [1] return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1] def lowercase_ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): a : Dict = [self.sep_token_id] a : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase_ ( self : Optional[int] ): return len(self.encoder ) def lowercase_ ( self : Dict ): return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self : Optional[int] , __snake_case : Optional[Any] ): if token in self.cache: return self.cache[token] a : int = tuple(__snake_case ) a : int = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) a : Dict = get_pairs(__snake_case ) if not pairs: return token while True: a : str = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('inf' ) ) ) if bigram not in self.bpe_ranks: break a : Tuple = bigram a : int = [] a : Optional[int] = 0 while i < len(__snake_case ): try: a : List[str] = word.index(__snake_case , __snake_case ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) a : Dict = j if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 a : Optional[Any] = tuple(__snake_case ) a : Tuple = new_word if len(__snake_case ) == 1: break else: a : Union[str, Any] = get_pairs(__snake_case ) a : List[Any] = '@@ '.join(__snake_case ) a : int = word[:-4] a : Optional[int] = word return word def lowercase_ ( self : Dict , __snake_case : List[str] ): a : Optional[int] = [] a : Optional[Any] = re.findall(r'\S+\n?' , __snake_case ) for token in words: split_tokens.extend(list(self.bpe(__snake_case ).split(' ' ) ) ) return split_tokens def lowercase_ ( self : List[Any] , __snake_case : Dict ): return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) ) def lowercase_ ( self : Dict , __snake_case : Dict ): return self.decoder.get(__snake_case , self.unk_token ) def lowercase_ ( self : List[Any] , __snake_case : Any ): a : Union[str, Any] = ' '.join(__snake_case ).replace('@@ ' , '' ).strip() return out_string def lowercase_ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ): if not os.path.isdir(__snake_case ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a : Dict = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) a : int = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ): copyfile(self.vocab_file , __snake_case ) if os.path.abspath(self.merges_file ) != os.path.abspath(__snake_case ): copyfile(self.merges_file , __snake_case ) return out_vocab_file, out_merge_file def lowercase_ ( self : Tuple , __snake_case : List[str] ): if isinstance(__snake_case , __snake_case ): try: with open(__snake_case , 'r' , encoding='utf-8' ) as fd: self.add_from_file(__snake_case ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" ) return a : List[str] = f.readlines() for lineTmp in lines: a : Any = lineTmp.strip() a : Dict = line.rfind(' ' ) if idx == -1: raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' ) a : str = line[:idx] a : Optional[int] = len(self.encoder )
711
'''simple docstring''' from math import factorial, pi def lowerCamelCase__ ( _A , _A = 30 ): if not isinstance(_A , (int, float) ): raise ValueError('maclaurin_sin() requires either an int or float for theta' ) if not isinstance(_A , _A ) or accuracy <= 0: raise ValueError('maclaurin_sin() requires a positive int for accuracy' ) a : List[Any] = float(_A ) a : Optional[int] = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_A ) ) def lowerCamelCase__ ( _A , _A = 30 ): if not isinstance(_A , (int, float) ): raise ValueError('maclaurin_cos() requires either an int or float for theta' ) if not isinstance(_A , _A ) or accuracy <= 0: raise ValueError('maclaurin_cos() requires a positive int for accuracy' ) a : List[str] = float(_A ) a : List[str] = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_A ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(1_0)) print(maclaurin_sin(-1_0)) print(maclaurin_sin(1_0, 1_5)) print(maclaurin_sin(-1_0, 1_5)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(1_0, 1_5)) print(maclaurin_cos(-1_0, 1_5))
195
0
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __magic_name__ ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[Any] , _lowercase : List[str] , _lowercase : int ): """simple docstring""" return f"""gaussian_noise_s={seed}_shape={'_'.join([str(_lowercase ) for s in shape] )}.npy""" def lowerCAmelCase ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def lowerCAmelCase ( self : Tuple , _lowercase : int=0 , _lowercase : Tuple=(4, 4, 64, 64) , _lowercase : Optional[int]=False ): """simple docstring""" _UpperCamelCase: List[str] = jnp.bfloataa if fpaa else jnp.floataa _UpperCamelCase: Any = jnp.array(load_hf_numpy(self.get_file_format(_lowercase , _lowercase ) ) , dtype=_lowercase ) return image def lowerCAmelCase ( self : List[Any] , _lowercase : str=False , _lowercase : str="CompVis/stable-diffusion-v1-4" ): """simple docstring""" _UpperCamelCase: Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa _UpperCamelCase: str = '''bf16''' if fpaa else None _UpperCamelCase , _UpperCamelCase: Dict = FlaxUNetaDConditionModel.from_pretrained( _lowercase , subfolder='''unet''' , dtype=_lowercase , revision=_lowercase ) return model, params def lowerCAmelCase ( self : Union[str, Any] , _lowercase : List[str]=0 , _lowercase : Union[str, Any]=(4, 77, 768) , _lowercase : Union[str, Any]=False ): """simple docstring""" _UpperCamelCase: str = jnp.bfloataa if fpaa else jnp.floataa _UpperCamelCase: Tuple = jnp.array(load_hf_numpy(self.get_file_format(_lowercase , _lowercase ) ) , dtype=_lowercase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def lowerCAmelCase ( self : str , _lowercase : Any , _lowercase : List[Any] , _lowercase : Optional[int] ): """simple docstring""" _UpperCamelCase , _UpperCamelCase: Optional[Any] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_lowercase ) _UpperCamelCase: Optional[Any] = self.get_latents(_lowercase , fpaa=_lowercase ) _UpperCamelCase: Any = self.get_encoder_hidden_states(_lowercase , fpaa=_lowercase ) _UpperCamelCase: Optional[Any] = model.apply( {'''params''': params} , _lowercase , jnp.array(_lowercase , dtype=jnp.intaa ) , encoder_hidden_states=_lowercase , ).sample assert sample.shape == latents.shape _UpperCamelCase: List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _UpperCamelCase: Any = jnp.array(_lowercase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_lowercase , _lowercase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def lowerCAmelCase ( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Tuple ): """simple docstring""" _UpperCamelCase , _UpperCamelCase: Any = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_lowercase ) _UpperCamelCase: Dict = self.get_latents(_lowercase , shape=(4, 4, 96, 96) , fpaa=_lowercase ) _UpperCamelCase: int = self.get_encoder_hidden_states(_lowercase , shape=(4, 77, 1_024) , fpaa=_lowercase ) _UpperCamelCase: List[str] = model.apply( {'''params''': params} , _lowercase , jnp.array(_lowercase , dtype=jnp.intaa ) , encoder_hidden_states=_lowercase , ).sample assert sample.shape == latents.shape _UpperCamelCase: Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _UpperCamelCase: str = jnp.array(_lowercase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_lowercase , _lowercase , atol=1E-2 )
271
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['''EncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['''TFEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['''FlaxEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
271
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCamelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def A__ ( lowerCamelCase ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowerCamelCase ) def A__ ( lowerCamelCase ) -> Tuple: from transformers.testing_utils import pytest_terminal_summary_main UpperCamelCase_: str = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(lowerCamelCase , id=lowerCamelCase )
670
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ : List[str] = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Optional[Any] = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
670
1
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ) -> Dict: _lowerCamelCase = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 1_2_8, """min_length""": 1_2, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_4_2, """min_length""": 5_6, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 6_2, """min_length""": 1_1, """num_beams""": 6}, } } _lowerCamelCase = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 1_2_8, """task_specific_params.summarization.min_length""": 1_2, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 1_4_2, """task_specific_params.summarization_cnn.min_length""": 5_6, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 6_2, """task_specific_params.summarization_xsum.min_length""": 1_1, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(snake_case__ ) , snake_case__ ) def _snake_case ( self : Union[str, Any] ) -> Optional[Any]: _lowerCamelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(snake_case__ ) , x.transpose() ) ) _lowerCamelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(snake_case__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def _snake_case ( self : List[Any] ) -> List[str]: _lowerCamelCase = np.random.randn(3 , 4 ) _lowerCamelCase = torch.tensor(snake_case__ ) self.assertTrue(np.allclose(transpose(snake_case__ ) , transpose(snake_case__ ).numpy() ) ) _lowerCamelCase = np.random.randn(3 , 4 , 5 ) _lowerCamelCase = torch.tensor(snake_case__ ) self.assertTrue(np.allclose(transpose(snake_case__ , axes=(1, 2, 0) ) , transpose(snake_case__ , axes=(1, 2, 0) ).numpy() ) ) @require_tf def _snake_case ( self : str ) -> Optional[int]: _lowerCamelCase = np.random.randn(3 , 4 ) _lowerCamelCase = tf.constant(snake_case__ ) self.assertTrue(np.allclose(transpose(snake_case__ ) , transpose(snake_case__ ).numpy() ) ) _lowerCamelCase = np.random.randn(3 , 4 , 5 ) _lowerCamelCase = tf.constant(snake_case__ ) self.assertTrue(np.allclose(transpose(snake_case__ , axes=(1, 2, 0) ) , transpose(snake_case__ , axes=(1, 2, 0) ).numpy() ) ) @require_flax def _snake_case ( self : Dict ) -> Any: _lowerCamelCase = np.random.randn(3 , 4 ) _lowerCamelCase = jnp.array(snake_case__ ) self.assertTrue(np.allclose(transpose(snake_case__ ) , np.asarray(transpose(snake_case__ ) ) ) ) _lowerCamelCase = np.random.randn(3 , 4 , 5 ) _lowerCamelCase = jnp.array(snake_case__ ) self.assertTrue(np.allclose(transpose(snake_case__ , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case__ , axes=(1, 2, 0) ) ) ) ) def _snake_case ( self : Optional[Any] ) -> Optional[int]: _lowerCamelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(snake_case__ , (4, 3) ) , np.reshape(snake_case__ , (4, 3) ) ) ) _lowerCamelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(snake_case__ , (1_2, 5) ) , np.reshape(snake_case__ , (1_2, 5) ) ) ) @require_torch def _snake_case ( self : str ) -> Tuple: _lowerCamelCase = np.random.randn(3 , 4 ) _lowerCamelCase = torch.tensor(snake_case__ ) self.assertTrue(np.allclose(reshape(snake_case__ , (4, 3) ) , reshape(snake_case__ , (4, 3) ).numpy() ) ) _lowerCamelCase = np.random.randn(3 , 4 , 5 ) _lowerCamelCase = torch.tensor(snake_case__ ) self.assertTrue(np.allclose(reshape(snake_case__ , (1_2, 5) ) , reshape(snake_case__ , (1_2, 5) ).numpy() ) ) @require_tf def _snake_case ( self : Optional[Any] ) -> Optional[Any]: _lowerCamelCase = np.random.randn(3 , 4 ) _lowerCamelCase = tf.constant(snake_case__ ) self.assertTrue(np.allclose(reshape(snake_case__ , (4, 3) ) , reshape(snake_case__ , (4, 3) ).numpy() ) ) _lowerCamelCase = np.random.randn(3 , 4 , 5 ) _lowerCamelCase = tf.constant(snake_case__ ) self.assertTrue(np.allclose(reshape(snake_case__ , (1_2, 5) ) , reshape(snake_case__ , (1_2, 5) ).numpy() ) ) @require_flax def _snake_case ( self : Union[str, Any] ) -> Dict: _lowerCamelCase = np.random.randn(3 , 4 ) _lowerCamelCase = jnp.array(snake_case__ ) self.assertTrue(np.allclose(reshape(snake_case__ , (4, 3) ) , np.asarray(reshape(snake_case__ , (4, 3) ) ) ) ) _lowerCamelCase = np.random.randn(3 , 4 , 5 ) _lowerCamelCase = jnp.array(snake_case__ ) self.assertTrue(np.allclose(reshape(snake_case__ , (1_2, 5) ) , np.asarray(reshape(snake_case__ , (1_2, 5) ) ) ) ) def _snake_case ( self : List[Any] ) -> Union[str, Any]: _lowerCamelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(snake_case__ ) , np.squeeze(snake_case__ ) ) ) _lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(snake_case__ , axis=2 ) , np.squeeze(snake_case__ , axis=2 ) ) ) @require_torch def _snake_case ( self : str ) -> Optional[int]: _lowerCamelCase = np.random.randn(1 , 3 , 4 ) _lowerCamelCase = torch.tensor(snake_case__ ) self.assertTrue(np.allclose(squeeze(snake_case__ ) , squeeze(snake_case__ ).numpy() ) ) _lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 ) _lowerCamelCase = torch.tensor(snake_case__ ) self.assertTrue(np.allclose(squeeze(snake_case__ , axis=2 ) , squeeze(snake_case__ , axis=2 ).numpy() ) ) @require_tf def _snake_case ( self : int ) -> Optional[Any]: _lowerCamelCase = np.random.randn(1 , 3 , 4 ) _lowerCamelCase = tf.constant(snake_case__ ) self.assertTrue(np.allclose(squeeze(snake_case__ ) , squeeze(snake_case__ ).numpy() ) ) _lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 ) _lowerCamelCase = tf.constant(snake_case__ ) self.assertTrue(np.allclose(squeeze(snake_case__ , axis=2 ) , squeeze(snake_case__ , axis=2 ).numpy() ) ) @require_flax def _snake_case ( self : str ) -> int: _lowerCamelCase = np.random.randn(1 , 3 , 4 ) _lowerCamelCase = jnp.array(snake_case__ ) self.assertTrue(np.allclose(squeeze(snake_case__ ) , np.asarray(squeeze(snake_case__ ) ) ) ) _lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 ) _lowerCamelCase = jnp.array(snake_case__ ) self.assertTrue(np.allclose(squeeze(snake_case__ , axis=2 ) , np.asarray(squeeze(snake_case__ , axis=2 ) ) ) ) def _snake_case ( self : Tuple ) -> Union[str, Any]: _lowerCamelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(snake_case__ , axis=1 ) , np.expand_dims(snake_case__ , axis=1 ) ) ) @require_torch def _snake_case ( self : List[Any] ) -> Any: _lowerCamelCase = np.random.randn(3 , 4 ) _lowerCamelCase = torch.tensor(snake_case__ ) self.assertTrue(np.allclose(expand_dims(snake_case__ , axis=1 ) , expand_dims(snake_case__ , axis=1 ).numpy() ) ) @require_tf def _snake_case ( self : int ) -> Any: _lowerCamelCase = np.random.randn(3 , 4 ) _lowerCamelCase = tf.constant(snake_case__ ) self.assertTrue(np.allclose(expand_dims(snake_case__ , axis=1 ) , expand_dims(snake_case__ , axis=1 ).numpy() ) ) @require_flax def _snake_case ( self : Any ) -> Dict: _lowerCamelCase = np.random.randn(3 , 4 ) _lowerCamelCase = jnp.array(snake_case__ ) self.assertTrue(np.allclose(expand_dims(snake_case__ , axis=1 ) , np.asarray(expand_dims(snake_case__ , axis=1 ) ) ) )
544
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. lowercase , lowercase : str = y, x % y return abs(SCREAMING_SNAKE_CASE__ ) def _snake_case( ) -> Dict: try: lowercase : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" ) lowercase : Tuple = int(nums[0] ) lowercase : List[Any] = int(nums[1] ) print( f"greatest_common_divisor({num_a}, {num_a}) = " f"{greatest_common_divisor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}" ) print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}" ) except (IndexError, UnboundLocalError, ValueError): print("""Wrong input""" ) if __name__ == "__main__": main()
336
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase (metaclass=a__ ): _lowercase : Any = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Dict: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Union[str, Any] = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Any = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Tuple: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Optional[Any] = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> int: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Any = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> List[Any]: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[Any]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Optional[int] = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> int: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
47
'''simple docstring''' import os import numpy import onnx def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = a.name _snake_case : List[Any] = b.name _snake_case : Tuple = '''''' _snake_case : Tuple = '''''' _snake_case : Optional[Any] = a == b _snake_case : List[Any] = name_a _snake_case : str = name_b return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = list(model.graph.initializer ) _snake_case : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : List[Any] = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = os.path.dirname(lowerCAmelCase_ ) _snake_case : str = os.path.basename(lowerCAmelCase_ ) _snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Union[str, Any] = list(model.graph.initializer ) _snake_case : Union[str, Any] = set() _snake_case : Any = {} _snake_case : str = [] _snake_case : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) _snake_case : List[Any] = inits[j].data_type _snake_case : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , lowerCAmelCase_ ) total_reduced_size += mem_size _snake_case : Union[str, Any] = inits[i].name _snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: _snake_case : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' ) _snake_case : List[str] = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = '''optimized_''' + model_file_name _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
47
1
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
611
def _UpperCAmelCase ( UpperCamelCase: int ): """simple docstring""" if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("Input value must be a 'int' type" ) return bin(UpperCamelCase ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
611
1
"""simple docstring""" from __future__ import annotations import math from collections.abc import Callable def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ): """simple docstring""" UpperCamelCase = x_start UpperCamelCase = fnc(_SCREAMING_SNAKE_CASE ) UpperCamelCase = 0.0 for _ in range(_SCREAMING_SNAKE_CASE ): # Approximates curve as a sequence of linear lines and sums their length UpperCamelCase = (x_end - x_start) / steps + xa UpperCamelCase = fnc(_SCREAMING_SNAKE_CASE ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step UpperCamelCase = xa UpperCamelCase = fxa return length if __name__ == "__main__": def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" return math.sin(10 * x ) print('''f(x) = sin(10 * x)''') print('''The length of the curve from x = -10 to x = 10 is:''') lowerCAmelCase__ = 10 while i <= 100_000: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
544
"""simple docstring""" from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class _lowerCamelCase : pass
544
1
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase_ ): '''simple docstring''' __lowerCamelCase : str = ["torch", "transformers", "onnx"] def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(self, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase_ ): '''simple docstring''' __lowerCamelCase : Optional[Any] = ["torch", "transformers", "onnx"] def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(self, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase_ ): '''simple docstring''' __lowerCamelCase : Tuple = ["torch", "transformers", "onnx"] def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(self, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase_ ): '''simple docstring''' __lowerCamelCase : Tuple = ["torch", "transformers", "onnx"] def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(self, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase_ ): '''simple docstring''' __lowerCamelCase : int = ["torch", "transformers", "onnx"] def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(self, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase_ ): '''simple docstring''' __lowerCamelCase : str = ["torch", "transformers", "onnx"] def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(self, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ): requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
662
"""simple docstring""" from __future__ import annotations def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive """simple docstring""" A = len(UpperCamelCase__ ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else A = array[0] A = False A = 1 A = [] while not is_found and i < array_length: if array[i] < pivot: A = True A = [element for element in array[i:] if element >= array[i]] A = longest_subsequence(UpperCamelCase__ ) if len(UpperCamelCase__ ) > len(UpperCamelCase__ ): A = temp_array else: i += 1 A = [element for element in array[1:] if element >= pivot] A = [pivot, *longest_subsequence(UpperCamelCase__ )] if len(UpperCamelCase__ ) > len(UpperCamelCase__ ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
690
0
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) class a ( snake_case__ ): '''simple docstring''' __lowerCAmelCase : int = """linear""" __lowerCAmelCase : Union[str, Any] = """cosine""" __lowerCAmelCase : Optional[Any] = """cosine_with_restarts""" __lowerCAmelCase : Optional[int] = """polynomial""" __lowerCAmelCase : Union[str, Any] = """constant""" __lowerCAmelCase : List[str] = """constant_with_warmup""" __lowerCAmelCase : Optional[Any] = """piecewise_constant""" def UpperCAmelCase_ ( A , A = -1 ): '''simple docstring''' return LambdaLR(A , lambda A : 1 , last_epoch=A ) def UpperCAmelCase_ ( A , A , A = -1 ): '''simple docstring''' def lr_lambda(A ): if current_step < num_warmup_steps: return float(A ) / float(max(1.0 , A ) ) return 1.0 return LambdaLR(A , A , last_epoch=A ) def UpperCAmelCase_ ( A , A , A = -1 ): '''simple docstring''' _a : str = {} _a : Any = step_rules.split(',' ) for rule_str in rule_list[:-1]: _a , _a : Union[str, Any] = rule_str.split(':' ) _a : Optional[int] = int(A ) _a : List[Any] = float(A ) _a : Optional[int] = value _a : Optional[int] = float(rule_list[-1] ) def create_rules_function(A , A ): def rule_func(A ) -> float: _a : Any = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(A ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func _a : Any = create_rules_function(A , A ) return LambdaLR(A , A , last_epoch=A ) def UpperCAmelCase_ ( A , A , A , A=-1 ): '''simple docstring''' def lr_lambda(A ): if current_step < num_warmup_steps: return float(A ) / float(max(1 , A ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(A , A , A ) def UpperCAmelCase_ ( A , A , A , A = 0.5 , A = -1 ): '''simple docstring''' def lr_lambda(A ): if current_step < num_warmup_steps: return float(A ) / float(max(1 , A ) ) _a : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A ) * 2.0 * progress )) ) return LambdaLR(A , A , A ) def UpperCAmelCase_ ( A , A , A , A = 1 , A = -1 ): '''simple docstring''' def lr_lambda(A ): if current_step < num_warmup_steps: return float(A ) / float(max(1 , A ) ) _a : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A ) * progress) % 1.0) )) ) return LambdaLR(A , A , A ) def UpperCAmelCase_ ( A , A , A , A=1E-7 , A=1.0 , A=-1 ): '''simple docstring''' _a : Any = optimizer.defaults['lr'] if not (lr_init > lr_end): raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' ) def lr_lambda(A ): if current_step < num_warmup_steps: return float(A ) / float(max(1 , A ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: _a : Tuple = lr_init - lr_end _a : Optional[int] = num_training_steps - num_warmup_steps _a : int = 1 - (current_step - num_warmup_steps) / decay_steps _a : int = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(A , A , A ) UpperCAmelCase_ : int = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def UpperCAmelCase_ ( A , A , A = None , A = None , A = None , A = 1 , A = 1.0 , A = -1 , ): '''simple docstring''' _a : List[Any] = SchedulerType(A ) _a : Any = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(A , last_epoch=A ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(A , step_rules=A , last_epoch=A ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(A , num_warmup_steps=A , last_epoch=A ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( A , num_warmup_steps=A , num_training_steps=A , num_cycles=A , last_epoch=A , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( A , num_warmup_steps=A , num_training_steps=A , power=A , last_epoch=A , ) return schedule_func( A , num_warmup_steps=A , num_training_steps=A , last_epoch=A )
424
'''simple docstring''' import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class a : '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_=9_9 , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=9 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_=8 , lowerCamelCase_=0.1 , lowerCamelCase_=0.002 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=None , lowerCamelCase_=None , ) -> Dict: _a : Optional[Any] = parent _a : str = batch_size _a : Dict = encoder_seq_length _a : List[Any] = decoder_seq_length # For common tests _a : int = self.decoder_seq_length _a : int = is_training _a : List[Any] = use_attention_mask _a : int = use_labels _a : Tuple = vocab_size _a : Tuple = hidden_size _a : Tuple = num_hidden_layers _a : Union[str, Any] = num_attention_heads _a : int = d_ff _a : Union[str, Any] = relative_attention_num_buckets _a : str = dropout_rate _a : List[str] = initializer_factor _a : List[str] = eos_token_id _a : List[str] = pad_token_id _a : Union[str, Any] = decoder_start_token_id _a : Any = None _a : Optional[Any] = decoder_layers def __UpperCamelCase ( self ) -> Union[str, Any]: return TaConfig.from_pretrained('google/umt5-base' ) def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ) -> str: if attention_mask is None: _a : int = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _a : int = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _a : List[str] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase_ ) if decoder_head_mask is None: _a : List[str] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase_ ) if cross_attn_head_mask is None: _a : int = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __UpperCamelCase ( self ) -> Optional[int]: _a : Dict = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _a : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _a : List[str] = input_ids.clamp(self.pad_token_id + 1 ) _a : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 ) _a : str = self.get_config() _a : List[Any] = config.num_attention_heads _a : List[str] = self.prepare_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return config, input_dict def __UpperCamelCase ( self ) -> Any: _a , _a : Dict = self.prepare_config_and_inputs() return config, inputs_dict def __UpperCamelCase ( self ) -> str: return TaConfig( vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __UpperCamelCase ( self ) -> List[Any]: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Any: _a : Optional[Any] = UMTaModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _a : str = model( input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , ) _a : List[Any] = model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ ) _a : Tuple = result.last_hidden_state _a : Optional[int] = result.past_key_values _a : Dict = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(lowerCamelCase_ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Tuple: _a : Any = UMTaModel(config=lowerCamelCase_ ).get_decoder().to(lowerCamelCase_ ).eval() # first forward pass _a : Union[str, Any] = model(lowerCamelCase_ , use_cache=lowerCamelCase_ ) _a : List[str] = model(lowerCamelCase_ ) _a : Optional[Any] = model(lowerCamelCase_ , use_cache=lowerCamelCase_ ) self.parent.assertTrue(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) ) self.parent.assertTrue(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) + 1 ) _a , _a : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _a : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _a : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) _a : Any = model(lowerCamelCase_ )['last_hidden_state'] _a : Any = model(lowerCamelCase_ , past_key_values=lowerCamelCase_ )['last_hidden_state'] # select random slice _a : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _a : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach() _a : int = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , ) -> Any: _a : Optional[Any] = UMTaModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).half().eval() _a : Tuple = model(**lowerCamelCase_ )['last_hidden_state'] self.parent.assertFalse(torch.isnan(lowerCamelCase_ ).any().item() ) @require_torch class a ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase : Union[str, Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) __lowerCAmelCase : Optional[int] = (UMTaForConditionalGeneration,) if is_torch_available() else () __lowerCAmelCase : Any = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) __lowerCAmelCase : Any = True __lowerCAmelCase : Dict = False __lowerCAmelCase : int = False __lowerCAmelCase : Union[str, Any] = True __lowerCAmelCase : int = True # The small UMT5 model needs higher percentages for CPU/MP tests __lowerCAmelCase : Any = [0.8, 0.9] def __UpperCamelCase ( self ) -> int: _a : Any = UMTaModelTester(self ) @unittest.skip('Test has a segmentation fault on torch 1.8.0' ) def __UpperCamelCase ( self ) -> List[str]: _a : int = self.model_tester.prepare_config_and_inputs() _a : List[str] = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( lowerCamelCase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=lowerCamelCase_ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def __UpperCamelCase ( self ) -> Union[str, Any]: _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase_ ) def __UpperCamelCase ( self ) -> Any: _a : Union[str, Any] = ['encoder_attentions', 'decoder_attentions', 'cross_attentions'] _a : Any = self.model_tester.prepare_config_and_inputs() _a : List[str] = config_and_inputs[0] _a : Optional[int] = UMTaForConditionalGeneration(lowerCamelCase_ ).eval() model.to(lowerCamelCase_ ) _a : Optional[int] = { 'head_mask': torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase_ ), 'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ ), 'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ ), } for attn_name, (name, mask) in zip(lowerCamelCase_ , head_masking.items() ): _a : Any = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _a : List[Any] = torch.ones( config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ ) _a : Optional[Any] = model.generate( config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase_ , return_dict_in_generate=lowerCamelCase_ , **lowerCamelCase_ , ) # We check the state of decoder_attentions and cross_attentions just from the last step _a : Any = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' ) def __UpperCamelCase ( self ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( 'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' ) def __UpperCamelCase ( self ) -> Tuple: _a : str = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=lowerCamelCase_ ).to(lowerCamelCase_ ) _a : str = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=lowerCamelCase_ , legacy=lowerCamelCase_ ) _a : str = [ 'Bonjour monsieur <extra_id_0> bien <extra_id_1>.', 'No se como puedo <extra_id_0>.', 'This is the reason why we <extra_id_0> them.', 'The <extra_id_0> walks in <extra_id_1>, seats', 'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.', ] _a : Optional[int] = tokenizer(lowerCamelCase_ , return_tensors='pt' , padding=lowerCamelCase_ ).input_ids # fmt: off _a : str = torch.tensor( [ [ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1], ] ) # fmt: on torch.testing.assert_allclose(lowerCamelCase_ , lowerCamelCase_ ) _a : Union[str, Any] = model.generate(input_ids.to(lowerCamelCase_ ) ) _a : int = [ '<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>', '<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', ] _a : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
424
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
46
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self ): '''simple docstring''' a_ : int = tempfile.mkdtemp() # fmt: off a_ : str = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on a_ : Tuple = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) a_ : Tuple = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] a_ : Tuple = {"""unk_token""": """<unk>"""} a_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCAmelCase_ ) ) a_ : int = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } a_ : int = os.path.join(self.tmpdirname , lowerCAmelCase_ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) def _lowerCAmelCase ( self , **lowerCAmelCase_ ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def _lowerCAmelCase ( self , **lowerCAmelCase_ ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def _lowerCAmelCase ( self , **lowerCAmelCase_ ): '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def _lowerCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] a_ : Tuple = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ): '''simple docstring''' a_ : List[Any] = self.get_tokenizer() a_ : Any = self.get_rust_tokenizer() a_ : Any = self.get_image_processor() a_ : List[Any] = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) a_ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ ) a_ : str = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) a_ : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ ) self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ ) self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ ) def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a_ : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) a_ : Any = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 ) a_ : Union[str, Any] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase_ ) def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Any = self.get_image_processor() a_ : List[str] = self.get_tokenizer() a_ : str = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) a_ : List[str] = self.prepare_image_inputs() a_ : Dict = image_processor(lowerCAmelCase_ , return_tensors="""np""" ) a_ : List[Any] = processor(images=lowerCAmelCase_ , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Optional[int] = self.get_image_processor() a_ : List[Any] = self.get_tokenizer() a_ : str = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) a_ : Optional[Any] = """lower newer""" a_ : List[str] = processor(text=lowerCAmelCase_ ) a_ : Union[str, Any] = tokenizer(lowerCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Tuple = self.get_image_processor() a_ : Union[str, Any] = self.get_tokenizer() a_ : Optional[Any] = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) a_ : Dict = """lower newer""" a_ : Optional[int] = self.prepare_image_inputs() a_ : str = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase_ ): processor() def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Optional[Any] = self.get_image_processor() a_ : str = self.get_tokenizer() a_ : Union[str, Any] = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) a_ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a_ : int = processor.batch_decode(lowerCAmelCase_ ) a_ : int = tokenizer.batch_decode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Optional[int] = self.get_image_processor() a_ : List[str] = self.get_tokenizer() a_ : int = CLIPProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) a_ : Dict = """lower newer""" a_ : Optional[Any] = self.prepare_image_inputs() a_ : Any = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
577
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a__ : str = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase__ ( __A ): '''simple docstring''' _lowerCamelCase =["pixel_values"] def __init__( self : str , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BICUBIC , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : bool = True , **a__ : Tuple , ): super().__init__(**a__ ) UpperCAmelCase = size if size is not None else {'''shortest_edge''': 224} UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ , param_name='''crop_size''' ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = resample UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase = do_convert_rgb def __snake_case ( self : List[Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BICUBIC , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ): UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) UpperCAmelCase = get_resize_output_image_size(a__ , size=size['''shortest_edge'''] , default_to_square=a__ ) return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ ) def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ): UpperCAmelCase = get_size_dict(a__ ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ ) def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Union[int, float] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : str , ): return rescale(a__ , scale=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : List[Any] , ): return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ ) def __snake_case ( self : int , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : int = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : bool = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **a__ : Dict , ): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(a__ , param_name='''size''' , default_to_square=a__ ) UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' , default_to_square=a__ ) UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase = make_list_of_images(a__ ) if not valid_images(a__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase = [convert_to_rgb(a__ ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(a__ ) for image in images] if do_resize: UpperCAmelCase = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images] if do_center_crop: UpperCAmelCase = [self.center_crop(image=a__ , size=a__ ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=a__ , scale=a__ ) for image in images] if do_normalize: UpperCAmelCase = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images] UpperCAmelCase = [to_channel_dimension_format(a__ , a__ ) for image in images] UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=a__ , tensor_type=a__ )
708
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a__ : Optional[Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Dict , *a__ : Optional[int] , **a__ : List[Any] ): warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , a__ , ) super().__init__(*a__ , **a__ )
570
0
"""simple docstring""" from torch import nn def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str: if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
159
"""simple docstring""" from collections import Counter from timeit import timeit def UpperCAmelCase ( _lowercase : str = "" , ) -> bool: """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2 def UpperCAmelCase ( _lowercase : str = "" ) -> bool: """simple docstring""" if len(_lowercase ) == 0: return True lowerCAmelCase_ = input_str.replace(''' ''' , '''''' ).lower() # character_freq_dict: Stores the frequency of every character in the input string lowerCAmelCase_ = {} for character in lower_case_input_str: lowerCAmelCase_ = character_freq_dict.get(_lowercase , 0 ) + 1 lowerCAmelCase_ = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def UpperCAmelCase ( _lowercase : str = "" ) -> None: """simple docstring""" print('''\nFor string = ''' , _lowercase , ''':''' ) print( '''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(_lowercase ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) print( '''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(_lowercase ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) if __name__ == "__main__": lowercase_ = input( 'Enter string to determine if it can be rearranged as a palindrome or not: ' ).strip() benchmark(check_str) lowercase_ = can_string_be_rearranged_as_palindrome_counter(check_str) print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
552
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCAmelCase = 16 _lowerCAmelCase = 32 def UpperCamelCase ( _A , _A = 16 ) -> Dict: lowercase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowercase : int = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(_A ): # max_length=None => use the model max length (it's actually the default) lowercase : int = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_A , max_length=_A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase : Dict = datasets.map( _A , batched=_A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_A ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase : List[str] = 16 elif accelerator.mixed_precision != "no": lowercase : Any = 8 else: lowercase : Optional[Any] = None return tokenizer.pad( _A , padding="""longest""" , max_length=_A , pad_to_multiple_of=_A , return_tensors="""pt""" , ) # Instantiate dataloaders. lowercase : List[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=_A , collate_fn=_A , batch_size=_A ) lowercase : Dict = DataLoader( tokenized_datasets["""validation"""] , shuffle=_A , collate_fn=_A , batch_size=_A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCAmelCase = mocked_dataloaders # noqa: F811 def UpperCamelCase ( _A , _A ) -> Optional[Any]: # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _A ) == "1": lowercase : Union[str, Any] = 2 # Initialize accelerator lowercase : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase : Tuple = config["""lr"""] lowercase : Union[str, Any] = int(config["""num_epochs"""] ) lowercase : List[str] = int(config["""seed"""] ) lowercase : Tuple = int(config["""batch_size"""] ) lowercase : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation lowercase : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowercase : Any = batch_size // MAX_GPU_BATCH_SIZE lowercase : List[str] = MAX_GPU_BATCH_SIZE set_seed(_A ) lowercase : str = get_dataloaders(_A , _A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase : Dict = model.to(accelerator.device ) # Instantiate optimizer lowercase : int = AdamW(params=model.parameters() , lr=_A ) # Instantiate scheduler lowercase : Optional[Any] = get_linear_schedule_with_warmup( optimizer=_A , num_warmup_steps=100 , num_training_steps=(len(_A ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase : Dict = accelerator.prepare( _A , _A , _A , _A , _A ) # Now we train the model for epoch in range(_A ): model.train() for step, batch in enumerate(_A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase : List[str] = model(**_A ) lowercase : List[str] = outputs.loss lowercase : str = loss / gradient_accumulation_steps accelerator.backward(_A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() lowercase : Dict = 0 for step, batch in enumerate(_A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase : Union[str, Any] = model(**_A ) lowercase : Tuple = outputs.logits.argmax(dim=-1 ) lowercase : Dict = accelerator.gather((predictions, batch["""labels"""]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(_A ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples lowercase : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowercase : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=_A , references=_A , ) lowercase : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , _A ) def UpperCamelCase ( ) -> Optional[int]: lowercase : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=_A , default=_A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowercase : List[Any] = parser.parse_args() lowercase : Union[str, Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(_A , _A ) if __name__ == "__main__": main()
709
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _lowerCAmelCase = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class UpperCamelCase (unittest.TestCase ): def __snake_case ( self :str , __magic_name__ :Path , __magic_name__ :Union[str, None] = None , __magic_name__ :Union[List[str], None] = None , __magic_name__ :Union[str, List[str], None] = None , __magic_name__ :bool = True , ) ->Optional[Any]: lowercase : Dict = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )] if identifier is not None: lowercase : Tuple = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(__magic_name__ , __magic_name__ ): for n_ in n_identifier: lowercase : List[str] = [file for file in files if n_ not in file] else: lowercase : str = [file for file in files if n_identifier not in file] lowercase : List[str] = ignore_files or [] ignore_files.append("""__init__.py""" ) lowercase : Tuple = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , __magic_name__ ) if only_modules: lowercase : List[Any] = file.split(""".""" )[0] try: lowercase : Dict = getattr(__magic_name__ , __magic_name__ ) lowercase : Dict = doctest.DocTestSuite(__magic_name__ ) lowercase : Optional[int] = unittest.TextTestRunner().run(__magic_name__ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f"""{module_identifier} is not a module.""" ) else: lowercase : List[str] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def __snake_case ( self :Optional[Any] ) ->Dict: lowercase : int = Path("""src/transformers""" ) lowercase : Tuple = """modeling""" lowercase : List[str] = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ ) def __snake_case ( self :str ) ->str: lowercase : Optional[int] = Path("""src/transformers""" ) lowercase : Tuple = """tokenization""" self.analyze_directory(__magic_name__ , identifier=__magic_name__ ) def __snake_case ( self :Optional[int] ) ->str: lowercase : Tuple = Path("""src/transformers""" ) lowercase : List[Any] = """configuration""" self.analyze_directory(__magic_name__ , identifier=__magic_name__ ) def __snake_case ( self :Tuple ) ->Any: lowercase : str = Path("""src/transformers""" ) lowercase : Optional[int] = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ ) def __snake_case ( self :List[str] ) ->Tuple: lowercase : List[str] = Path("""docs/source""" ) lowercase : int = ["""favicon.ico"""] self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
348
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ["MBartTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ["MBartTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ "MBART_PRETRAINED_MODEL_ARCHIVE_LIST", "MBartForCausalLM", "MBartForConditionalGeneration", "MBartForQuestionAnswering", "MBartForSequenceClassification", "MBartModel", "MBartPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ "TFMBartForConditionalGeneration", "TFMBartModel", "TFMBartPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ "FlaxMBartForConditionalGeneration", "FlaxMBartForQuestionAnswering", "FlaxMBartForSequenceClassification", "FlaxMBartModel", "FlaxMBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
684
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 1_3_1_0_7_2, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, } def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2 def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: _UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2 _UpperCAmelCase = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) class __SCREAMING_SNAKE_CASE ( lowercase): pass class __SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self : str , __UpperCamelCase : Optional[int] ): super().__init__() _UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 ) _UpperCAmelCase = deepcopy(self.diffusion ) _UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = MODELS_MAP[model_name]["url"] os.system(F'''wget {url} ./''' ) return F'''./{model_name}.ckpt''' __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } __lowerCAmelCase = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } __lowerCAmelCase = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } __lowerCAmelCase = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } __lowerCAmelCase = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: if name.startswith("skip" ): return name.replace("skip" , RES_CONV_MAP["skip"] ) # name has to be of format main.{digit} if not name.startswith("main." ): raise ValueError(F'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]: for key, value in ATTN_MAP.items(): if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ): return name.replace(_lowerCAmelCase , _lowerCAmelCase ) elif name.startswith(_lowerCAmelCase ): return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value] raise ValueError(F'''Attn error with {name}''' ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]: _UpperCAmelCase = input_string if string.split("." )[0] == "timestep_embed": return string.replace("timestep_embed" , "time_proj" ) _UpperCAmelCase = 0 if string.startswith("net.3." ): depth += 1 _UpperCAmelCase = string[6:] elif string.startswith("net." ): _UpperCAmelCase = string[4:] while string.startswith("main.7." ): depth += 1 _UpperCAmelCase = string[7:] if string.startswith("main." ): _UpperCAmelCase = string[5:] # mid block if string[:2].isdigit(): _UpperCAmelCase = string[:2] _UpperCAmelCase = string[2:] else: _UpperCAmelCase = string[0] _UpperCAmelCase = string[1:] if depth == max_depth: _UpperCAmelCase = MID_NUM_TO_LAYER[layer_num] _UpperCAmelCase = "mid_block" elif depth > 0 and int(_lowerCAmelCase ) < 7: _UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''down_blocks.{depth}''' elif depth > 0 and int(_lowerCAmelCase ) > 7: _UpperCAmelCase = UP_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: _UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0" if not string_left.startswith("." ): raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' ) _UpperCAmelCase = string_left[1:] if "resnets" in new_layer: _UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase ) elif "attentions" in new_layer: _UpperCAmelCase = convert_attn_naming(_lowerCAmelCase ) _UpperCAmelCase = new_string_left if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = prefix + "." + new_layer + "." + string_left else: _UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]: _UpperCAmelCase = {} for k, v in state_dict.items(): if k.endswith("kernel" ): # up- and downsample layers, don't have trainable weights continue _UpperCAmelCase = rename(_lowerCAmelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _UpperCAmelCase = v return new_state_dict def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: if len(_lowerCAmelCase ) == 1: if len(v.shape ) == 3: # weight _UpperCAmelCase = v[:, :, 0] else: # bias _UpperCAmelCase = v else: # qkv matrices _UpperCAmelCase = v.shape[0] _UpperCAmelCase = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: _UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) _UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' _UpperCAmelCase = download(_lowerCAmelCase ) _UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"] _UpperCAmelCase = MODELS_MAP[model_name]["sample_size"] _UpperCAmelCase = Object() _UpperCAmelCase = sample_size _UpperCAmelCase = sample_rate _UpperCAmelCase = 0 _UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase ) _UpperCAmelCase = diffusers_model.state_dict() _UpperCAmelCase = DiffusionUncond(_lowerCAmelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] ) _UpperCAmelCase = orig_model.diffusion_ema.eval() _UpperCAmelCase = orig_model.state_dict() _UpperCAmelCase = rename_orig_weights(_lowerCAmelCase ) _UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": _UpperCAmelCase = value.squeeze() _UpperCAmelCase = value diffusers_model.load_state_dict(_lowerCAmelCase ) _UpperCAmelCase = 100 _UpperCAmelCase = 33 _UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(_lowerCAmelCase ) _UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase ) _UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1] _UpperCAmelCase = get_crash_schedule(_lowerCAmelCase ) _UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(33 ) _UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios _UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} ) _UpperCAmelCase = generated.clamp(-1 , 1 ) _UpperCAmelCase = (generated - audio).abs().sum() _UpperCAmelCase = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("Diff sum" , _lowerCAmelCase ) print("Diff max" , _lowerCAmelCase ) assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/''' print(F'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") __lowerCAmelCase = parser.parse_args() main(args)
684
1
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowercase : def __init__( self , A_ , A_=99 , A_=13 , A_=16 , A_=7 , A_=True , A_=True , A_=True , A_=False , A_=True , A_=2 , A_=32 , A_=4 , A_=4 , A_=30 , A_=0 , A_=1 , A_=2 , A_=None , ) -> Tuple: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = decoder_seq_length # For common tests UpperCamelCase = self.decoder_seq_length UpperCamelCase = is_training UpperCamelCase = use_attention_mask UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = d_model UpperCamelCase = decoder_layers UpperCamelCase = decoder_layers UpperCamelCase = decoder_ffn_dim UpperCamelCase = decoder_attention_heads UpperCamelCase = decoder_attention_heads UpperCamelCase = eos_token_id UpperCamelCase = bos_token_id UpperCamelCase = pad_token_id UpperCamelCase = decoder_start_token_id UpperCamelCase = use_cache UpperCamelCase = max_position_embeddings UpperCamelCase = None UpperCamelCase = decoder_seq_length UpperCamelCase = 2 UpperCamelCase = 1 def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_attention_mask: UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) UpperCamelCase = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = True UpperCamelCase = TrOCRDecoder(config=A_ ).to(A_ ).eval() UpperCamelCase = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass UpperCamelCase = model(A_ , use_cache=A_ ) UpperCamelCase = model(A_ ) UpperCamelCase = model(A_ , use_cache=A_ ) self.parent.assertTrue(len(A_ ) == len(A_ ) ) self.parent.assertTrue(len(A_ ) == len(A_ ) + 1 ) UpperCamelCase = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids UpperCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase = model(A_ )['last_hidden_state'] UpperCamelCase = model(A_ , past_key_values=A_ )['last_hidden_state'] # select random slice UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(A_ , A_ , atol=1e-3 ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowercase : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowercase : str = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} __lowercase : List[str] = True __lowercase : str = False def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=A_ ) UpperCamelCase = ConfigTester(self , config_class=A_ ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" pass def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" pass def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" pass def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*A_ ) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" return @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" pass
3
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] ) def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase ) UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: UpperCamelCase = dataset_size < in_memory_max_size else: UpperCamelCase = False UpperCamelCase = is_small_dataset(lowercase ) assert result == expected
3
1
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def _snake_case ( self )->Any: '''simple docstring''' A_ : List[Any] = mock.Mock() A_ : Any = 500 A_ : List[Any] = {} A_ : Optional[Any] = HTTPError A_ : Optional[int] = {} # Download this model to make sure it's in the cache. A_ : Dict = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_SCREAMING_SNAKE_CASE ) as mock_head: A_ : List[Any] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def _snake_case ( self )->str: '''simple docstring''' A_ : Tuple = mock.Mock() A_ : Union[str, Any] = 500 A_ : Dict = {} A_ : Optional[Any] = HTTPError A_ : str = {} # Download this model to make sure it's in the cache. A_ : str = GPTaTokenizerFast.from_pretrained('''gpt2''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_SCREAMING_SNAKE_CASE ) as mock_head: A_ : str = GPTaTokenizerFast.from_pretrained('''gpt2''' ) # This check we did call the fake head request mock_head.assert_called() def _snake_case ( self )->Tuple: '''simple docstring''' try: A_ : int = tempfile.mktemp() with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f: http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , _SCREAMING_SNAKE_CASE ) A_ : int = AlbertTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) finally: os.remove(_SCREAMING_SNAKE_CASE ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('''tokenizer.json''' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('''tokenizer.json''' , '''wb''' ) as f: http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , _SCREAMING_SNAKE_CASE ) A_ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('''tokenizer.json''' ) def _snake_case ( self )->int: '''simple docstring''' A_ : Union[str, Any] = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' ) @is_staging_test class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" snake_case = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def _snake_case ( cls )->List[Any]: '''simple docstring''' A_ : Optional[int] = TOKEN HfFolder.save_token(_SCREAMING_SNAKE_CASE ) @classmethod def _snake_case ( cls )->List[str]: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-tokenizer''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' ) except HTTPError: pass def _snake_case ( self )->str: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: A_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A_ : Any = BertTokenizer(_SCREAMING_SNAKE_CASE ) tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token ) A_ : Dict = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='''test-tokenizer''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-tokenizer''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A_ : Tuple = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def _snake_case ( self )->Any: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: A_ : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A_ : List[Any] = BertTokenizer(_SCREAMING_SNAKE_CASE ) tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token ) A_ : Optional[int] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( _SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A_ : Optional[int] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def _snake_case ( self )->List[Any]: '''simple docstring''' CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A_ : int = CustomTokenizer(_SCREAMING_SNAKE_CASE ) # No fast custom tokenizer tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token ) A_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: A_ : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) A_ : Tuple = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE ) tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token ) A_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' ) A_ : int = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' , use_fast=_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' ) class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def _snake_case ( self )->Optional[int]: '''simple docstring''' A_ : Optional[int] = Trie() trie.add('''Hello 友達''' ) self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) trie.add('''Hello''' ) trie.data self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) def _snake_case ( self )->Dict: '''simple docstring''' A_ : Optional[int] = Trie() self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] ) trie.add('''[CLS]''' ) trie.add('''extra_id_1''' ) trie.add('''extra_id_100''' ) self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] ) def _snake_case ( self )->Optional[Any]: '''simple docstring''' A_ : Union[str, Any] = Trie() trie.add('''A''' ) self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] ) self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] ) def _snake_case ( self )->int: '''simple docstring''' A_ : str = Trie() trie.add('''TOKEN]''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def _snake_case ( self )->List[Any]: '''simple docstring''' A_ : str = Trie() trie.add('''A''' ) trie.add('''P''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def _snake_case ( self )->str: '''simple docstring''' A_ : Tuple = Trie() trie.add('''AB''' ) trie.add('''B''' ) trie.add('''C''' ) self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] ) def _snake_case ( self )->List[str]: '''simple docstring''' A_ : List[Any] = Trie() trie.add('''ABC''' ) trie.add('''B''' ) trie.add('''CD''' ) self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] ) def _snake_case ( self )->str: '''simple docstring''' A_ : str = Trie() A_ : List[str] = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] ) self.assertEqual(_SCREAMING_SNAKE_CASE , ['''AB''', '''C'''] )
590
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : Dict = [0 for i in range(r + 1 )] # nc0 = 1 A_ : Tuple = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. A_ : str = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
590
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ : str = { """configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""], """tokenization_roc_bert""": ["""RoCBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : Optional[int] = [ """ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """RoCBertForCausalLM""", """RoCBertForMaskedLM""", """RoCBertForMultipleChoice""", """RoCBertForPreTraining""", """RoCBertForQuestionAnswering""", """RoCBertForSequenceClassification""", """RoCBertForTokenClassification""", """RoCBertLayer""", """RoCBertModel""", """RoCBertPreTrainedModel""", """load_tf_weights_in_roc_bert""", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __magic_name__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
368
'''simple docstring''' from __future__ import annotations import numpy as np def snake_case_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' return np.maximum(0 , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
368
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor __magic_name__ : List[str] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ): '''simple docstring''' def __init__( self , *lowerCamelCase , **lowerCamelCase ): warnings.warn( "The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DonutImageProcessor instead." , lowerCamelCase , ) super().__init__(*lowerCamelCase , **lowerCamelCase )
672
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __magic_name__ : Optional[int] = { """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : Optional[int] = [ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __magic_name__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
672
1
'''simple docstring''' from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __UpperCAmelCase ( a_: str, a_: complex, a_: str = "x", a_: float = 10**-10, a_: int = 1, ): _UpperCAmelCase : Optional[Any] = symbols(a_ ) _UpperCAmelCase : Optional[int] = lambdify(a_, a_ ) _UpperCAmelCase : Tuple = lambdify(a_, diff(a_, a_ ) ) _UpperCAmelCase : Optional[int] = starting_point while True: if diff_function(a_ ) != 0: _UpperCAmelCase : List[Any] = prev_guess - multiplicity * func(a_ ) / diff_function( a_ ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess _UpperCAmelCase : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial # Find fourth Root of 5 print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}') # Find value of e print( 'The root of log(y) - 1 = 0 is ', f'{newton_raphson("log(y) - 1", 2, variable="y")}', ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f'{newton_raphson("exp(x) - 1", 10, precision=0.0_0_5)}', ) # Find root of cos(x) print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
257
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) __a = 'hf-internal-testing/tiny-random-bert' __a = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert') __a = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6' class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" _UpperCAmelCase : int = cached_file(lowerCAmelCase__ , lowerCAmelCase__ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(lowerCAmelCase__ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ) ) with open(os.path.join(lowerCAmelCase__ , "refs" , "main" ) ) as f: _UpperCAmelCase : int = f.read() self.assertEqual(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "snapshots" , lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertTrue(os.path.isfile(lowerCAmelCase__ ) ) # File is cached at the same place the second time. _UpperCAmelCase : Dict = cached_file(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Using a specific revision to test the full commit hash. _UpperCAmelCase : Optional[int] = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , revision="9b8c223" ) self.assertEqual(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "snapshots" , lowerCAmelCase__ , lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid model identifier" ): _UpperCAmelCase : Any = cached_file("tiny-random-bert" , lowerCAmelCase__ ) with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid git identifier" ): _UpperCAmelCase : List[Any] = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , revision="aaaa" ) with self.assertRaisesRegex(lowerCAmelCase__ , "does not appear to have a file named" ): _UpperCAmelCase : Union[str, Any] = cached_file(lowerCAmelCase__ , "conf" ) def _lowerCAmelCase ( self : str ) -> int: """simple docstring""" with self.assertRaisesRegex(lowerCAmelCase__ , "does not appear to have a file named" ): _UpperCAmelCase : Dict = cached_file(lowerCAmelCase__ , "conf" ) with open(os.path.join(lowerCAmelCase__ , "refs" , "main" ) ) as f: _UpperCAmelCase : Dict = f.read() self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase__ , ".no_exist" , lowerCAmelCase__ , "conf" ) ) ) _UpperCAmelCase : Optional[int] = cached_file(lowerCAmelCase__ , "conf" , _raise_exceptions_for_missing_entries=lowerCAmelCase__ ) self.assertIsNone(lowerCAmelCase__ ) _UpperCAmelCase : str = cached_file(lowerCAmelCase__ , "conf" , local_files_only=lowerCAmelCase__ , _raise_exceptions_for_missing_entries=lowerCAmelCase__ ) self.assertIsNone(lowerCAmelCase__ ) _UpperCAmelCase : Any = mock.Mock() _UpperCAmelCase : str = 5_0_0 _UpperCAmelCase : Optional[Any] = {} _UpperCAmelCase : Tuple = HTTPError _UpperCAmelCase : Optional[int] = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head: _UpperCAmelCase : Optional[Any] = cached_file(lowerCAmelCase__ , "conf" , _raise_exceptions_for_connection_errors=lowerCAmelCase__ ) self.assertIsNone(lowerCAmelCase__ ) # This check we did call the fake head request mock_head.assert_called() def _lowerCAmelCase ( self : int ) -> Any: """simple docstring""" self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase__ ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase__ ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid model identifier" ): get_file_from_repo("bert-base-case" , lowerCAmelCase__ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid git identifier" ): get_file_from_repo("bert-base-cased" , lowerCAmelCase__ , revision="ahaha" ) _UpperCAmelCase : Optional[Any] = get_file_from_repo("bert-base-cased" , lowerCAmelCase__ ) # The name is the cached name which is not very easy to test, so instead we load the content. _UpperCAmelCase : List[Any] = json.loads(open(lowerCAmelCase__ , "r" ).read() ) self.assertEqual(config["hidden_size"] , 7_6_8 ) def _lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase : Union[str, Any] = Path(lowerCAmelCase__ ) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(lowerCAmelCase__ , "a.txt" ) , str(lowerCAmelCase__ ) ) self.assertIsNone(get_file_from_repo(lowerCAmelCase__ , "b.txt" ) )
257
1
import numpy as np from PIL import Image def A__ ( lowercase: np.ndarray, lowercase: int, lowercase: int ) -> np.ndarray: A : str =np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) A : Optional[int] =0 A : str =0 A : List[str] =0 A : Optional[int] =0 # compute the shape of the output matrix A : int =(arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape A : Tuple =np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix A : List[str] =np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 A : Dict =0 A : Union[str, Any] =0 return updated_arr def A__ ( lowercase: np.ndarray, lowercase: int, lowercase: int ) -> np.ndarray: A : Any =np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) A : int =0 A : Optional[int] =0 A : Union[str, Any] =0 A : int =0 # compute the shape of the output matrix A : str =(arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape A : List[str] =np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix A : Any =int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 A : Union[str, Any] =0 A : List[Any] =0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image _lowercase : Optional[int] =Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
305
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _lowercase : Optional[Any] =logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ : '''simple docstring''' def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : uuid.UUID = None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Any: if not conversation_id: A : int =uuid.uuida() if past_user_inputs is None: A : Tuple =[] if generated_responses is None: A : Any =[] A : uuid.UUID =conversation_id A : List[str] =past_user_inputs A : List[str] =generated_responses A : Optional[str] =text def __eq__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int: if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False ) -> Any: if self.new_user_input: if overwrite: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' f'with: "{text}".' ) A : Union[str, Any] =text else: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' ) else: A : List[str] =text def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) A : List[str] =None def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]: self.generated_responses.append(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any: for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : Optional[Any] ) -> List[str]: A : int =f'Conversation id: {self.uuid} \n' for is_user, text in self.iter_texts(): A : List[str] ='user' if is_user else 'bot' output += f'{name} >> {text} \n' return output @add_end_docstrings( lowerCAmelCase_ , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , ) class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> Any: super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if self.tokenizer.pad_token_id is None: A : Optional[int] =self.tokenizer.eos_token def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : str ) -> int: A : List[Any] ={} A : str ={} A : Any ={} if min_length_for_response is not None: A : Dict =min_length_for_response if minimum_tokens is not None: A : Union[str, Any] =minimum_tokens if "max_length" in generate_kwargs: A : str =generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: A : Dict =clean_up_tokenization_spaces if generate_kwargs: forward_params.update(SCREAMING_SNAKE_CASE__ ) return preprocess_params, forward_params, postprocess_params def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[Conversation, List[Conversation]] , SCREAMING_SNAKE_CASE__ : Tuple=0 , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]: A : int =super().__call__(SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) == 1: return outputs[0] return outputs def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Conversation , SCREAMING_SNAKE_CASE__ : Optional[int]=32 ) -> Dict[str, Any]: if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): A : str =self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE__ ) else: # If the tokenizer cannot handle conversations, we default to only the old version A : Any =self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE__ ) if self.framework == "pt": A : str =torch.LongTensor([input_ids] ) elif self.framework == "tf": A : List[Any] =tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=10 , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]: A : List[Any] =generate_kwargs.get('max_length' , self.model.config.max_length ) A : Any =model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' ) A : Dict =max_length - minimum_tokens A : Dict =model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: A : int =model_inputs['attention_mask'][:, -trim:] A : Union[str, Any] =model_inputs.pop('conversation' ) A : Optional[int] =max_length A : str =self.model.generate(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if self.model.config.is_encoder_decoder: A : str =1 else: A : Any =n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=True ) -> Optional[Any]: A : Any =model_outputs['output_ids'] A : Dict =self.tokenizer.decode( output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ , ) A : int =model_outputs['conversation'] conversation.mark_processed() conversation.append_response(SCREAMING_SNAKE_CASE__ ) return conversation def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Conversation ) -> Dict: A : List[str] =self.tokenizer.eos_token_id A : str =[] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) if len(SCREAMING_SNAKE_CASE__ ) > self.tokenizer.model_max_length: A : Dict =input_ids[-self.tokenizer.model_max_length :] return input_ids
305
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __A : Optional[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys __A : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
187
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Tuple = { """configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""], """tokenization_deberta""": ["""DebertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = ["""DebertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ """DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """DebertaForMaskedLM""", """DebertaForQuestionAnswering""", """DebertaForSequenceClassification""", """DebertaForTokenClassification""", """DebertaModel""", """DebertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ """TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDebertaForMaskedLM""", """TFDebertaForQuestionAnswering""", """TFDebertaForSequenceClassification""", """TFDebertaForTokenClassification""", """TFDebertaModel""", """TFDebertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
187
1
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(_SCREAMING_SNAKE_CASE , n - 1 , _SCREAMING_SNAKE_CASE ) * a) % mod else: _A = binary_exponentiation(_SCREAMING_SNAKE_CASE , n / 2 , _SCREAMING_SNAKE_CASE ) return (b * b) % mod # a prime number lowerCamelCase_ = 7_01 lowerCamelCase_ = 10_00_00_00_00 lowerCamelCase_ = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
330
'''simple docstring''' from __future__ import annotations def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[int] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_SCREAMING_SNAKE_CASE ) if n > 1: factors.append(_SCREAMING_SNAKE_CASE ) return factors if __name__ == "__main__": import doctest doctest.testmod()
71
0
__a : Dict = 6_5_5_2_1 def __magic_name__ ( lowercase_ ) -> int: '''simple docstring''' UpperCamelCase = 1 UpperCamelCase = 0 for plain_chr in plain_text: UpperCamelCase = (a + ord(lowercase_ )) % MOD_ADLER UpperCamelCase = (b + a) % MOD_ADLER return (b << 16) | a
707
from math import factorial def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> float: '''simple docstring''' if successes > trials: raise ValueError("successes must be lower or equal to trials" ) if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers" ) if not isinstance(lowercase_ , lowercase_ ) or not isinstance(lowercase_ , lowercase_ ): raise ValueError("the function is defined for non-negative integers" ) if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0" ) UpperCamelCase = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! UpperCamelCase = float(factorial(lowercase_ ) ) coefficient /= factorial(lowercase_ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("""Probability of 2 successes out of 4 trails""") print("""with probability of 0.75 is:""", end=""" """) print(binomial_distribution(2, 4, 0.75))
414
0
'''simple docstring''' import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase_ = 1_6 UpperCAmelCase_ = 3_2 def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int = 16 ): '''simple docstring''' UpperCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) UpperCAmelCase__ = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(SCREAMING_SNAKE_CASE__ : Tuple ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ = datasets.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase__ = 8 else: UpperCAmelCase__ = None return tokenizer.pad( SCREAMING_SNAKE_CASE__ , padding="""longest""" , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , ) # Instantiate dataloaders. UpperCAmelCase__ = DataLoader( tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , drop_last=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = DataLoader( tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , drop_last=(accelerator.mixed_precision == """fp8""") , ) return train_dataloader, eval_dataloader def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ = config["""lr"""] UpperCAmelCase__ = int(config["""num_epochs"""] ) UpperCAmelCase__ = int(config["""seed"""] ) UpperCAmelCase__ = int(config["""batch_size"""] ) UpperCAmelCase__ = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation UpperCAmelCase__ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase__ = MAX_GPU_BATCH_SIZE set_seed(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ ) # Instantiate scheduler UpperCAmelCase__ = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = outputs.loss UpperCAmelCase__ = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
603
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' assert x is not None assert y is not None UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) # declaring the array for storing the dp values UpperCAmelCase__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): UpperCAmelCase__ = 1 if x[i - 1] == y[j - 1] else 0 UpperCAmelCase__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) UpperCAmelCase__ = """""" UpperCAmelCase__ , UpperCAmelCase__ = m, n while i > 0 and j > 0: UpperCAmelCase__ = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: UpperCAmelCase__ = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": UpperCAmelCase_ = 'AGGTAB' UpperCAmelCase_ = 'GXTXAYB' UpperCAmelCase_ = 4 UpperCAmelCase_ = 'GTAB' UpperCAmelCase_ , UpperCAmelCase_ = longest_common_subsequence(a, b) print('len =', ln, ', sub-sequence =', subseq) import doctest doctest.testmod()
603
1
'''simple docstring''' from __future__ import annotations import time lowercase__ = list[tuple[int, int]] lowercase__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class __SCREAMING_SNAKE_CASE : def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: _a = pos_x _a = pos_y _a = (pos_y, pos_x) _a = goal_x _a = goal_y _a = parent class __SCREAMING_SNAKE_CASE : def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> Tuple: _a = Node(start[1] , start[0] , goal[1] , goal[0] , __UpperCamelCase ) _a = Node(goal[1] , goal[0] , goal[1] , goal[0] , __UpperCamelCase ) _a = [self.start] _a = False def a_ ( self ) -> Path | None: while self.node_queue: _a = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: _a = True return self.retrace_path(__UpperCamelCase ) _a = self.get_successors(__UpperCamelCase ) for node in successors: self.node_queue.append(__UpperCamelCase ) if not self.reached: return [self.start.pos] return None def a_ ( self , __UpperCamelCase ) -> list[Node]: _a = [] for action in delta: _a = parent.pos_x + action[1] _a = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__UpperCamelCase , __UpperCamelCase , self.target.pos_y , self.target.pos_x , __UpperCamelCase ) ) return successors def a_ ( self , __UpperCamelCase ) -> Path: _a = node _a = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _a = current_node.parent path.reverse() return path class __SCREAMING_SNAKE_CASE : def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: _a = BreadthFirstSearch(__UpperCamelCase , __UpperCamelCase ) _a = BreadthFirstSearch(__UpperCamelCase , __UpperCamelCase ) _a = False def a_ ( self ) -> Path | None: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: _a = self.fwd_bfs.node_queue.pop(0 ) _a = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: _a = True return self.retrace_bidirectional_path( __UpperCamelCase , __UpperCamelCase ) _a = current_bwd_node _a = current_fwd_node _a = { self.fwd_bfs: self.fwd_bfs.get_successors(__UpperCamelCase ), self.bwd_bfs: self.bwd_bfs.get_successors(__UpperCamelCase ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__UpperCamelCase ) if not self.reached: return [self.fwd_bfs.start.pos] return None def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> Path: _a = self.fwd_bfs.retrace_path(__UpperCamelCase ) _a = self.bwd_bfs.retrace_path(__UpperCamelCase ) bwd_path.pop() bwd_path.reverse() _a = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowercase__ = (0, 0) lowercase__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowercase__ = time.time() lowercase__ = BreadthFirstSearch(init, goal) lowercase__ = bfs.search() lowercase__ = time.time() - start_bfs_time print("Unidirectional BFS computation time : ", bfs_time) lowercase__ = time.time() lowercase__ = BidirectionalBreadthFirstSearch(init, goal) lowercase__ = bd_bfs.search() lowercase__ = time.time() - start_bd_bfs_time print("Bidirectional BFS computation time : ", bd_bfs_time)
276
'''simple docstring''' lowercase__ = 65_521 def __UpperCamelCase ( __lowerCamelCase : str ) -> int: '''simple docstring''' _a = 1 _a = 0 for plain_chr in plain_text: _a = (a + ord(__lowerCamelCase )) % MOD_ADLER _a = (b + a) % MOD_ADLER return (b << 16) | a
276
1
import string def _lowerCamelCase ( __lowerCamelCase ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): UpperCAmelCase__ : Union[str, Any] = """""" for symbol in message: if symbol in string.ascii_uppercase: UpperCAmelCase__ : int = string.ascii_uppercase.find(__lowerCamelCase ) UpperCAmelCase__ : Optional[int] = num - key if num < 0: UpperCAmelCase__ : Optional[int] = num + len(string.ascii_uppercase ) UpperCAmelCase__ : Optional[Any] = translated + string.ascii_uppercase[num] else: UpperCAmelCase__ : List[Any] = translated + symbol print(F"Decryption using Key #{key}: {translated}" ) def _lowerCamelCase ( ) -> None: '''simple docstring''' UpperCAmelCase__ : List[Any] = input("""Encrypted message: """ ) UpperCAmelCase__ : Dict = message.upper() decrypt(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
79
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""", # See all REALM models at https://huggingface.co/models?filter=realm } class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = 'realm' def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=128 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=8 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu_new" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=256 , _lowerCAmelCase=10 , _lowerCAmelCase=1e-3 , _lowerCAmelCase=5 , _lowerCAmelCase=320 , _lowerCAmelCase=13353718 , _lowerCAmelCase=5000 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , **_lowerCAmelCase , ): super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) # Common config UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Dict = max_position_embeddings UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : str = retriever_proj_size UpperCAmelCase__ : Tuple = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : List[Any] = num_candidates UpperCAmelCase__ : str = intermediate_size UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Optional[Any] = layer_norm_eps # Reader config UpperCAmelCase__ : str = span_hidden_size UpperCAmelCase__ : Union[str, Any] = max_span_width UpperCAmelCase__ : List[str] = reader_layer_norm_eps UpperCAmelCase__ : Dict = reader_beam_size UpperCAmelCase__ : Union[str, Any] = reader_seq_len # Retrieval config UpperCAmelCase__ : List[Any] = num_block_records UpperCAmelCase__ : List[Any] = searcher_beam_size
79
1
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("Quantized models are not supported." ) __SCREAMING_SNAKE_CASE : int = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , __lowerCamelCase ) if matches: __SCREAMING_SNAKE_CASE : int = float(matches[1] ) __SCREAMING_SNAKE_CASE : Optional[int] = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __SCREAMING_SNAKE_CASE : Dict = 1001 __SCREAMING_SNAKE_CASE : Optional[int] = "imagenet-1k-id2label.json" __SCREAMING_SNAKE_CASE : Dict = "huggingface/label-files" __SCREAMING_SNAKE_CASE : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) ) __SCREAMING_SNAKE_CASE : Optional[int] = {int(__lowerCamelCase ) + 1: v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : Optional[Any] = "background" __SCREAMING_SNAKE_CASE : Tuple = idalabel __SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def _lowerCAmelCase ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" __SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = get_mobilenet_va_config(__lowerCamelCase ) # Load 🤗 model __SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(__lowerCamelCase ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __SCREAMING_SNAKE_CASE : Any = MobileNetVaImageProcessor( crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , ) __SCREAMING_SNAKE_CASE : Any = image_processor(images=prepare_img() , return_tensors="pt" ) __SCREAMING_SNAKE_CASE : Optional[Any] = model(**__lowerCamelCase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __SCREAMING_SNAKE_CASE : str = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __SCREAMING_SNAKE_CASE : List[str] = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print("Pushing to the hub..." ) __SCREAMING_SNAKE_CASE : List[Any] = "google/" + model_name image_processor.push_to_hub(__lowerCamelCase ) model.push_to_hub(__lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""mobilenet_v1_1.0_224""", type=str, help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""", ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _lowerCamelCase = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
713
from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class _SCREAMING_SNAKE_CASE : pass
447
0
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): snake_case_ : Optional[Any] = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: snake_case_ : Dict = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } def __a ( __UpperCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ : int = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowerCamelCase_ : List[str] = numpy_to_pil(__UpperCAmelCase ) return images def __a ( __UpperCAmelCase : Any ) -> Tuple: """simple docstring""" if images.ndim == 3: lowerCamelCase_ : Optional[Any] = images[None, ...] lowerCamelCase_ : Optional[int] = (images * 255).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowerCamelCase_ : Optional[Any] = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: lowerCamelCase_ : Optional[Any] = [Image.fromarray(__UpperCAmelCase ) for image in images] return pil_images
488
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case_ ( __A ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: lowerCamelCase_ : List[str] = tempfile.mkdtemp() lowerCamelCase_ : Dict = 8 # DPR tok lowerCamelCase_ : str = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase_ : List[str] = os.path.join(__magic_name__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok lowerCamelCase_ : Tuple = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowerCamelCase_ : str = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) lowerCamelCase_ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowerCamelCase_ : Any = {"unk_token": "<unk>"} lowerCamelCase_ : Dict = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase_ : Tuple = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ : int = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__magic_name__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__magic_name__ ) ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: lowerCamelCase_ : Optional[Any] = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: lowerCamelCase_ : str = self.get_dummy_dataset() lowerCamelCase_ : Dict = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowerCamelCase_ : Tuple = dataset lowerCamelCase_ : Optional[int] = RagRetriever( __magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : bool ) -> List[Any]: lowerCamelCase_ : Optional[int] = self.get_dummy_dataset() lowerCamelCase_ : Union[str, Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: lowerCamelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , "dataset" ) lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset lowerCamelCase_ : Optional[int] = RagRetriever( __magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCamelCase_ : List[Any] = RagRetriever( __magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __magic_name__ ) , ) return retriever def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: lowerCamelCase_ : str = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCamelCase_ : List[str] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) lowerCamelCase_ : List[str] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) lowerCamelCase_ : Optional[int] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(__magic_name__ , open(__magic_name__ , "wb" ) ) lowerCamelCase_ : Tuple = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) lowerCamelCase_ : Any = RagRetriever( __magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: lowerCamelCase_ : Optional[Any] = 1 lowerCamelCase_ : int = self.get_dummy_canonical_hf_index_retriever() lowerCamelCase_ : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__magic_name__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any: lowerCamelCase_ : Dict = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowerCamelCase_ : Dict = self.get_dummy_dataset() retriever.save_pretrained(__magic_name__ ) lowerCamelCase_ : List[Any] = RagRetriever.from_pretrained(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) lowerCamelCase_ : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=1 ) self.assertTrue(out is not None ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: lowerCamelCase_ : Optional[int] = 1 lowerCamelCase_ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ ) lowerCamelCase_ : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__magic_name__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: lowerCamelCase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__magic_name__ ) lowerCamelCase_ : List[Any] = RagRetriever.from_pretrained(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) lowerCamelCase_ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCamelCase_ : int = retriever.retrieve(__magic_name__ , n_docs=1 ) self.assertTrue(out is not None ) def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: lowerCamelCase_ : Union[str, Any] = 1 lowerCamelCase_ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ ) lowerCamelCase_ : str = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__magic_name__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: lowerCamelCase_ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__magic_name__ ) lowerCamelCase_ : str = RagRetriever.from_pretrained(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) lowerCamelCase_ : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCamelCase_ : List[Any] = retriever.retrieve(__magic_name__ , n_docs=1 ) self.assertTrue(out is not None ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: lowerCamelCase_ : Union[str, Any] = 1 lowerCamelCase_ : Dict = self.get_dummy_legacy_index_retriever() lowerCamelCase_ : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__magic_name__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , __magic_name__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: lowerCamelCase_ : List[str] = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__magic_name__ ) lowerCamelCase_ : Optional[int] = RagRetriever.from_pretrained(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) lowerCamelCase_ : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCamelCase_ : Optional[Any] = retriever.retrieve(__magic_name__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: import torch lowerCamelCase_ : Dict = 1 lowerCamelCase_ : List[str] = self.get_dummy_canonical_hf_index_retriever() lowerCamelCase_ : str = [[5, 7], [10, 11]] lowerCamelCase_ : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCamelCase_ : Dict = retriever(__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__magic_name__ , __magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) self.assertIsInstance(__magic_name__ , np.ndarray ) lowerCamelCase_ : Dict = retriever( __magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ , return_tensors="pt" , ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__magic_name__ , torch.Tensor ) self.assertIsInstance(__magic_name__ , torch.Tensor ) self.assertIsInstance(__magic_name__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: lowerCamelCase_ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer() lowerCamelCase_ : Any = 1 lowerCamelCase_ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ ) retriever.set_ctx_encoder_tokenizer(__magic_name__ ) lowerCamelCase_ : List[Any] = [[5, 7], [10, 11]] lowerCamelCase_ : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCamelCase_ : int = retriever(__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ ) self.assertEqual( len(__magic_name__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , __magic_name__ ) # check for doc token related keys in dictionary.
488
1
'''simple docstring''' def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCAmelCase__ ) ) def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' if index == len(UpperCAmelCase__ ): return True # Recursive Step for i in range(UpperCAmelCase__ ): if valid_coloring(graph[index] , UpperCAmelCase__ , UpperCAmelCase__ ): # Color current vertex a_ =i # Validate coloring if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ): return True # Backtrack a_ =-1 return False def UpperCAmelCase_ ( lowercase__ , lowercase__ ): '''simple docstring''' a_ =[-1] * len(UpperCAmelCase__ ) if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 0 ): return colored_vertices return []
710
'''simple docstring''' import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) lowercase = logging.getLogger() def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' a_ ={} a_ =os.path.join(lowercase__ , "all_results.json" ) if os.path.exists(lowercase__ ): with open(lowercase__ , "r" ) as f: a_ =json.load(lowercase__ ) else: raise ValueError(F"""can't find {path}""" ) return results lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class UpperCAmelCase ( __a): '''simple docstring''' def lowercase_ ( self) -> List[Any]: """simple docstring""" import xla_spawn a_ =self.get_auto_remove_tmp_dir() a_ =f""" ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_): a_ =time() xla_spawn.main() a_ =time() a_ =get_results(lowerCAmelCase_) self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 5_0_0) def lowercase_ ( self) -> Tuple: """simple docstring""" import xla_spawn a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split() with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_): xla_spawn.main()
41
0
from __future__ import annotations from collections.abc import Callable _lowerCAmelCase : Optional[Any] = list[list[float | int]] def UpperCamelCase_( _snake_case : Matrix , _snake_case : Matrix ): """simple docstring""" __a =len(_snake_case ) __a =[[0 for _ in range(size + 1 )] for _ in range(_snake_case )] __a =42 __a =42 __a =42 __a =42 __a =42 __a =42 for row in range(_snake_case ): for col in range(_snake_case ): __a =matrix[row][col] __a =vector[row][0] __a =0 __a =0 while row < size and col < size: # pivoting __a =max((abs(augmented[rowa][col] ), rowa) for rowa in range(_snake_case , _snake_case ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __a , __a =augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _snake_case ): __a =augmented[rowa][col] / augmented[row][col] __a =0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _snake_case ): for row in range(_snake_case ): __a =augmented[row][col] / augmented[col][col] for cola in range(_snake_case , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_snake_case ) ] def UpperCamelCase_( _snake_case : list[int] ): """simple docstring""" __a =len(_snake_case ) __a =[[0 for _ in range(_snake_case )] for _ in range(_snake_case )] __a =[[0] for _ in range(_snake_case )] __a =42 __a =42 __a =42 __a =42 for x_val, y_val in enumerate(_snake_case ): for col in range(_snake_case ): __a =(x_val + 1) ** (size - col - 1) __a =y_val __a =solve(_snake_case , _snake_case ) def interpolated_func(_snake_case : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_snake_case ) ) return interpolated_func def UpperCamelCase_( _snake_case : int ): """simple docstring""" return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def UpperCamelCase_( _snake_case : Callable[[int], int] = question_function , _snake_case : int = 10 ): """simple docstring""" __a =[func(_snake_case ) for x_val in range(1 , order + 1 )] __a =[ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __a =0 __a =42 __a =42 for poly in polynomials: __a =1 while func(_snake_case ) == poly(_snake_case ): x_val += 1 ret += poly(_snake_case ) return ret if __name__ == "__main__": print(f'''{solution() = }''')
242
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Tuple = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json", "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json", "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __magic_name__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE = 'big_bird' def __init__( self , __snake_case=5_0358 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu_new" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=4096 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=True , __snake_case=0 , __snake_case=1 , __snake_case=2 , __snake_case=66 , __snake_case="block_sparse" , __snake_case=True , __snake_case=False , __snake_case=64 , __snake_case=3 , __snake_case=None , **__snake_case , ) -> Dict: '''simple docstring''' super().__init__( pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , sep_token_id=__snake_case , **__snake_case , ) __a =vocab_size __a =max_position_embeddings __a =hidden_size __a =num_hidden_layers __a =num_attention_heads __a =intermediate_size __a =hidden_act __a =hidden_dropout_prob __a =attention_probs_dropout_prob __a =initializer_range __a =type_vocab_size __a =layer_norm_eps __a =use_cache __a =rescale_embeddings __a =attention_type __a =use_bias __a =block_size __a =num_random_blocks __a =classifier_dropout class __magic_name__ ( lowerCAmelCase_ ): @property def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __a ={0: 'batch', 1: 'choice', 2: 'sequence'} else: __a ={0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
242
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ : Any = logging.get_logger(__name__) snake_case_ : Any = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class snake_case__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE__ = '''vivit''' def __init__( self : Any , lowercase : Optional[Any]=2_24 , lowercase : Union[str, Any]=32 , lowercase : Optional[int]=[2, 16, 16] , lowercase : Union[str, Any]=3 , lowercase : Any=7_68 , lowercase : Tuple=12 , lowercase : Tuple=12 , lowercase : Any=30_72 , lowercase : Union[str, Any]="gelu_fast" , lowercase : List[Any]=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.0_2 , lowercase : Optional[int]=1E-06 , lowercase : Optional[Any]=True , **lowercase : Any , ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Optional[int] = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Dict = hidden_act UpperCAmelCase : Union[str, Any] = hidden_dropout_prob UpperCAmelCase : List[str] = attention_probs_dropout_prob UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Any = layer_norm_eps UpperCAmelCase : List[Any] = image_size UpperCAmelCase : str = num_frames UpperCAmelCase : int = tubelet_size UpperCAmelCase : int = num_channels UpperCAmelCase : Any = qkv_bias super().__init__(**lowercase )
292
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class snake_case__ : def __init__( self : Any ): '''simple docstring''' UpperCAmelCase : List[Any] = {} def __lowerCAmelCase ( self : List[Any] , lowercase : str ): '''simple docstring''' UpperCAmelCase : Optional[int] = {} def __lowerCAmelCase ( self : Optional[int] , lowercase : str , lowercase : str , lowercase : float ): '''simple docstring''' if nodea not in self.connections: self.add_node(lowercase ) if nodea not in self.connections: self.add_node(lowercase ) UpperCAmelCase : int = probability def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return list(self.connections ) def __lowerCAmelCase ( self : int , lowercase : str ): '''simple docstring''' UpperCAmelCase : int = 0 UpperCAmelCase : Union[str, Any] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def lowercase_ ( _lowercase : str , _lowercase : list[tuple[str, str, float]] , _lowercase : int ): '''simple docstring''' UpperCAmelCase : int = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_lowercase , _lowercase , _lowercase ) UpperCAmelCase : List[Any] = Counter(graph.get_nodes() ) UpperCAmelCase : List[Any] = start for _ in range(_lowercase ): UpperCAmelCase : Optional[Any] = graph.transition(_lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
292
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType A : Optional[int] = logging.get_logger(__name__) A : Optional[Any] = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = '''deberta-v2''' def __init__(self : Optional[Any] , _UpperCAmelCase : Any=12_8100 , _UpperCAmelCase : List[Any]=1536 , _UpperCAmelCase : List[Any]=24 , _UpperCAmelCase : List[Any]=24 , _UpperCAmelCase : Optional[int]=6144 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : str=512 , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1E-7 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Tuple=-1 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : Optional[int]="gelu" , **_UpperCAmelCase : Tuple , ) -> List[Any]: """simple docstring""" super().__init__(**_UpperCAmelCase ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = relative_attention lowercase__ = max_relative_positions lowercase__ = pad_token_id lowercase__ = position_biased_input # Backwards compatibility if type(_UpperCAmelCase ) == str: lowercase__ = [x.strip() for x in pos_att_type.lower().split("""|""" )] lowercase__ = pos_att_type lowercase__ = vocab_size lowercase__ = layer_norm_eps lowercase__ = kwargs.get("""pooler_hidden_size""" , _UpperCAmelCase ) lowercase__ = pooler_dropout lowercase__ = pooler_hidden_act class A ( UpperCAmelCase__ ): '''simple docstring''' @property def lowerCamelCase__ (self : str ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowerCamelCase__ (self : int ) -> int: """simple docstring""" return 12 def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]: """simple docstring""" lowercase__ = super().generate_dummy_inputs(preprocessor=_UpperCAmelCase , framework=_UpperCAmelCase ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
15
"""simple docstring""" from __future__ import annotations import math def __a ( a, a ): """simple docstring""" _a = u for i in range(1, a ): _a = temp * (u - i) return temp def __a ( ): """simple docstring""" _a = int(input("enter the numbers of values: " ) ) _a = [] for _ in range(a ): y.append([] ) for i in range(a ): for j in range(a ): y[i].append(a ) _a = 0 print("enter the values of parameters in a list: " ) _a = list(map(a, input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(a ): _a = float(input() ) _a = int(input("enter the value to interpolate: " ) ) _a = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1, a ): for j in range(n - i ): _a = y[j + 1][i - 1] - y[j][i - 1] _a = y[0][0] for i in range(1, a ): summ += (ucal(a, a ) * y[0][i]) / math.factorial(a ) print(F'the value at {value} is {summ}' ) if __name__ == "__main__": main()
388
0
'''simple docstring''' import pytest __SCREAMING_SNAKE_CASE : Any = '__dummy_dataset1__' __SCREAMING_SNAKE_CASE : List[Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def _snake_case ( ) -> Tuple: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _snake_case ( ) -> Optional[Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _snake_case ( lowercase , lowercase , lowercase ) -> List[Any]: __a : Dict = dataset_loading_script_name __a : Any = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowercase ) __a : Union[str, Any] = script_dir / F"""{script_name}.py""" with open(lowercase , """w""" ) as f: f.write(lowercase ) return str(lowercase )
697
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = "" lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' super().__init__(self , **__UpperCamelCase ) __a : int = repo_info __a : int = token __a : Any = None def __lowerCamelCase ( self ): '''simple docstring''' if self.dir_cache is None: __a : Union[str, Any] = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __a : List[str] = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ): '''simple docstring''' if not isinstance(self.repo_info , __UpperCamelCase ): raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) __a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha ) return fsspec.open( __UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' self._get_dirs() __a : str = self._strip_protocol(__UpperCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ): '''simple docstring''' self._get_dirs() __a : int = PurePosixPath(path.strip("""/""" ) ) __a : List[str] = {} for p, f in self.dir_cache.items(): __a : str = PurePosixPath(p.strip("""/""" ) ) __a : Optional[int] = p.parent if root == path: __a : List[str] = f __a : str = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
697
1
'''simple docstring''' from collections.abc import Sequence def a__ ( UpperCamelCase_ : Sequence[int] | None = None ): if nums is None or not nums: raise ValueError('''Input sequence should not be empty''' ) UpperCAmelCase__ :List[str] = nums[0] for i in range(1, len(UpperCamelCase_ ) ): UpperCAmelCase__ :str = nums[i] UpperCAmelCase__ :Tuple = max(UpperCamelCase_, ans + num, UpperCamelCase_ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __lowerCamelCase = int(input('''Enter number of elements : ''').strip()) __lowerCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
467
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str ): UpperCAmelCase__ :Any = list(UpperCamelCase_ ) UpperCAmelCase__ :Optional[int] = list(UpperCamelCase_ ) UpperCAmelCase__ :str = 0 for i in range(len(UpperCamelCase_ ) ): if lista[i] != lista[i]: count += 1 UpperCAmelCase__ :Union[str, Any] = '''_''' if count > 1: return False else: return "".join(UpperCamelCase_ ) def a__ ( UpperCamelCase_ : list[str] ): UpperCAmelCase__ :int = [] while True: UpperCAmelCase__ :Dict = ['''$'''] * len(UpperCamelCase_ ) UpperCAmelCase__ :List[str] = [] for i in range(len(UpperCamelCase_ ) ): for j in range(i + 1, len(UpperCamelCase_ ) ): UpperCAmelCase__ :Optional[Any] = compare_string(binary[i], binary[j] ) if k is False: UpperCAmelCase__ :Any = '''*''' UpperCAmelCase__ :List[Any] = '''*''' temp.append('''X''' ) for i in range(len(UpperCamelCase_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(UpperCamelCase_ ) == 0: return pi UpperCAmelCase__ :Tuple = list(set(UpperCamelCase_ ) ) def a__ ( UpperCamelCase_ : int, UpperCamelCase_ : Sequence[float] ): UpperCAmelCase__ :int = [] for minterm in minterms: UpperCAmelCase__ :int = '''''' for _ in range(UpperCamelCase_ ): UpperCAmelCase__ :Optional[int] = str(minterm % 2 ) + string minterm //= 2 temp.append(UpperCamelCase_ ) return temp def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : int ): UpperCAmelCase__ :Dict = list(UpperCamelCase_ ) UpperCAmelCase__ :str = list(UpperCamelCase_ ) UpperCAmelCase__ :str = 0 for i in range(len(UpperCamelCase_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def a__ ( UpperCamelCase_ : list[list[int]], UpperCamelCase_ : list[str] ): UpperCAmelCase__ :Optional[Any] = [] UpperCAmelCase__ :List[Any] = [0] * len(UpperCamelCase_ ) for i in range(len(chart[0] ) ): UpperCAmelCase__ :Optional[Any] = 0 UpperCAmelCase__ :Union[str, Any] = -1 for j in range(len(UpperCamelCase_ ) ): if chart[j][i] == 1: count += 1 UpperCAmelCase__ :Any = j if count == 1: UpperCAmelCase__ :Any = 1 for i in range(len(UpperCamelCase_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(UpperCamelCase_ ) ): UpperCAmelCase__ :int = 0 temp.append(prime_implicants[i] ) while True: UpperCAmelCase__ :Optional[int] = 0 UpperCAmelCase__ :Dict = -1 UpperCAmelCase__ :Optional[Any] = 0 for i in range(len(UpperCamelCase_ ) ): UpperCAmelCase__ :str = chart[i].count(1 ) if count_n > max_n: UpperCAmelCase__ :Any = count_n UpperCAmelCase__ :List[Any] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(UpperCamelCase_ ) ): UpperCAmelCase__ :Optional[int] = 0 def a__ ( UpperCamelCase_ : list[str], UpperCamelCase_ : list[str] ): UpperCAmelCase__ :List[str] = [[0 for x in range(len(UpperCamelCase_ ) )] for x in range(len(UpperCamelCase_ ) )] for i in range(len(UpperCamelCase_ ) ): UpperCAmelCase__ :Tuple = prime_implicants[i].count('''_''' ) for j in range(len(UpperCamelCase_ ) ): if is_for_table(prime_implicants[i], binary[j], UpperCamelCase_ ): UpperCAmelCase__ :List[str] = 1 return chart def a__ ( ): UpperCAmelCase__ :int = int(input('''Enter the no. of variables\n''' ) ) UpperCAmelCase__ :Tuple = [ float(UpperCamelCase_ ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] UpperCAmelCase__ :Union[str, Any] = decimal_to_binary(UpperCamelCase_, UpperCamelCase_ ) UpperCAmelCase__ :Optional[Any] = check(UpperCamelCase_ ) print('''Prime Implicants are:''' ) print(UpperCamelCase_ ) UpperCAmelCase__ :Optional[int] = prime_implicant_chart(UpperCamelCase_, UpperCamelCase_ ) UpperCAmelCase__ :Dict = selection(UpperCamelCase_, UpperCamelCase_ ) print('''Essential Prime Implicants are:''' ) print(UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
467
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A =logging.get_logger(__name__) A ={ 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class _a ( __a ): __a : Dict = """blip_text_model""" def __init__( self : Union[str, Any] , lowercase : Optional[Any]=30_524 , lowercase : Optional[int]=768 , lowercase : Any=768 , lowercase : Optional[int]=3_072 , lowercase : int=768 , lowercase : Any=12 , lowercase : Dict=8 , lowercase : List[Any]=512 , lowercase : Any="gelu" , lowercase : Tuple=1E-12 , lowercase : str=0.0 , lowercase : Optional[Any]=0.0 , lowercase : int=0.02 , lowercase : Dict=30_522 , lowercase : Union[str, Any]=2 , lowercase : Optional[int]=0 , lowercase : Optional[int]=102 , lowercase : List[Any]=True , lowercase : Optional[Any]=True , **lowercase : Any , ): '''simple docstring''' super().__init__( pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , sep_token_id=lowercase , **lowercase , ) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = encoder_hidden_size UpperCAmelCase = intermediate_size UpperCAmelCase = projection_dim UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = max_position_embeddings UpperCAmelCase = layer_norm_eps UpperCAmelCase = hidden_act UpperCAmelCase = initializer_range UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = is_decoder UpperCAmelCase = use_cache @classmethod def A ( cls : int , lowercase : Union[str, os.PathLike] , **lowercase : List[str] ): '''simple docstring''' cls._set_token_in_kwargs(lowercase ) UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowercase , **lowercase ) # get the text config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": UpperCAmelCase = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(lowercase , **lowercase ) class _a ( __a ): __a : List[Any] = """blip_vision_model""" def __init__( self : Any , lowercase : int=768 , lowercase : Optional[int]=3_072 , lowercase : List[Any]=512 , lowercase : Optional[Any]=12 , lowercase : Optional[Any]=12 , lowercase : Union[str, Any]=384 , lowercase : Dict=16 , lowercase : Optional[int]="gelu" , lowercase : Union[str, Any]=1E-5 , lowercase : str=0.0 , lowercase : str=1E-10 , **lowercase : Tuple , ): '''simple docstring''' super().__init__(**lowercase ) UpperCAmelCase = hidden_size UpperCAmelCase = intermediate_size UpperCAmelCase = projection_dim UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = patch_size UpperCAmelCase = image_size UpperCAmelCase = initializer_range UpperCAmelCase = attention_dropout UpperCAmelCase = layer_norm_eps UpperCAmelCase = hidden_act @classmethod def A ( cls : Tuple , lowercase : Union[str, os.PathLike] , **lowercase : str ): '''simple docstring''' cls._set_token_in_kwargs(lowercase ) UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowercase , **lowercase ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": UpperCAmelCase = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(lowercase , **lowercase ) class _a ( __a ): __a : Tuple = """blip""" __a : str = True def __init__( self : Union[str, Any] , lowercase : Tuple=None , lowercase : Any=None , lowercase : str=512 , lowercase : str=2.6592 , lowercase : Union[str, Any]=256 , **lowercase : Union[str, Any] , ): '''simple docstring''' super().__init__(**lowercase ) if text_config is None: UpperCAmelCase = {} logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' ) if vision_config is None: UpperCAmelCase = {} logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' ) UpperCAmelCase = BlipTextConfig(**lowercase ) UpperCAmelCase = BlipVisionConfig(**lowercase ) UpperCAmelCase = self.vision_config.hidden_size UpperCAmelCase = projection_dim UpperCAmelCase = logit_scale_init_value UpperCAmelCase = 1.0 UpperCAmelCase = 0.02 UpperCAmelCase = image_text_hidden_size @classmethod def A ( cls : Optional[int] , lowercase : BlipTextConfig , lowercase : BlipVisionConfig , **lowercase : List[str] ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase ) def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = copy.deepcopy(self.__dict__ ) UpperCAmelCase = self.text_config.to_dict() UpperCAmelCase = self.vision_config.to_dict() UpperCAmelCase = self.__class__.model_type return output
358
'''simple docstring''' import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _a : def __init__( self : List[str] , lowercase : Dict , lowercase : List[Any]=13 , lowercase : Optional[Any]=7 , lowercase : Any=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : str=True , lowercase : List[str]=99 , lowercase : int=64 , lowercase : List[Any]=32 , lowercase : str=5 , lowercase : Optional[int]=4 , lowercase : int=37 , lowercase : str="gelu" , lowercase : Any=0.1 , lowercase : Optional[Any]=0.1 , lowercase : Optional[int]=512 , lowercase : Union[str, Any]=16 , lowercase : List[str]=2 , lowercase : Tuple=0.02 , lowercase : List[Any]=3 , lowercase : int=4 , lowercase : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_input_mask UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = embedding_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope def A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Optional[Any] ): '''simple docstring''' return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def A ( self : Tuple , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : str , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = MobileBertModel(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase ) UpperCAmelCase = model(lowercase , token_type_ids=lowercase ) UpperCAmelCase = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : Dict , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : List[str] , lowercase : Dict , lowercase : List[str] , lowercase : Optional[int] , lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = MobileBertForMaskedLM(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Dict , lowercase : Any , lowercase : Dict , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : List[Any] , lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = MobileBertForNextSentencePrediction(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : Optional[int] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Tuple ): '''simple docstring''' UpperCAmelCase = MobileBertForPreTraining(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : str , lowercase : Optional[Any] , lowercase : List[str] , lowercase : int , lowercase : Any ): '''simple docstring''' UpperCAmelCase = MobileBertForQuestionAnswering(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Optional[Any] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[str] , lowercase : List[str] , lowercase : str , lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.num_labels UpperCAmelCase = MobileBertForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : List[Any] , lowercase : int , lowercase : List[Any] , lowercase : Tuple , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.num_labels UpperCAmelCase = MobileBertForTokenClassification(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : int , lowercase : Optional[int] , lowercase : Tuple , lowercase : List[Any] , lowercase : Tuple , lowercase : str , lowercase : Tuple , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = self.num_choices UpperCAmelCase = MobileBertForMultipleChoice(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _a ( __a , __a , unittest.TestCase ): __a : Dict = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) __a : List[str] = ( { """feature-extraction""": MobileBertModel, """fill-mask""": MobileBertForMaskedLM, """question-answering""": MobileBertForQuestionAnswering, """text-classification""": MobileBertForSequenceClassification, """token-classification""": MobileBertForTokenClassification, """zero-shot""": MobileBertForSequenceClassification, } if is_torch_available() else {} ) __a : Any = True def A ( self : List[Any] , lowercase : int , lowercase : Any , lowercase : Optional[Any]=False ): '''simple docstring''' UpperCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) if return_labels: if model_class in get_values(lowercase ): UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase ) UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase ) return inputs_dict def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = MobileBertModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def A ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase ) def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase ) def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase ) def snake_case_ (_a : List[Any] ): return torch.tensor( _a , dtype=torch.long , device=_a , ) A =1E-3 @require_torch @require_sentencepiece @require_tokenizers class _a ( unittest.TestCase ): @slow def A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(lowercase ) UpperCAmelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] ) with torch.no_grad(): UpperCAmelCase = model(lowercase )[0] UpperCAmelCase = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , lowercase ) UpperCAmelCase = torch.tensor( [ [ [-2.4_736_526E07, 8.2_691_656E04, 1.6_521_838E05], [-5.7_541_704E-01, 3.9_056_022E00, 4.4_011_507E00], [2.6_047_359E00, 1.5_677_652E00, -1.7_324_188E-01], ] ] , device=lowercase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
358
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase ( lowerCamelCase_ ): lowercase = '''beit''' def __init__(self : Tuple ,SCREAMING_SNAKE_CASE_ : Dict=8_192 ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=768 ,SCREAMING_SNAKE_CASE_ : Any=12 ,SCREAMING_SNAKE_CASE_ : Optional[int]=12 ,SCREAMING_SNAKE_CASE_ : Optional[Any]=3_072 ,SCREAMING_SNAKE_CASE_ : Tuple="gelu" ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE_ : List[str]=0.0 ,SCREAMING_SNAKE_CASE_ : str=0.02 ,SCREAMING_SNAKE_CASE_ : List[Any]=1e-12 ,SCREAMING_SNAKE_CASE_ : str=224 ,SCREAMING_SNAKE_CASE_ : List[Any]=16 ,SCREAMING_SNAKE_CASE_ : Tuple=3 ,SCREAMING_SNAKE_CASE_ : Optional[Any]=False ,SCREAMING_SNAKE_CASE_ : Tuple=False ,SCREAMING_SNAKE_CASE_ : str=False ,SCREAMING_SNAKE_CASE_ : Optional[int]=False ,SCREAMING_SNAKE_CASE_ : List[str]=0.1 ,SCREAMING_SNAKE_CASE_ : List[str]=0.1 ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE_ : Tuple=[3, 5, 7, 11] ,SCREAMING_SNAKE_CASE_ : Tuple=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE_ : str=True ,SCREAMING_SNAKE_CASE_ : Optional[int]=0.4 ,SCREAMING_SNAKE_CASE_ : List[Any]=256 ,SCREAMING_SNAKE_CASE_ : Any=1 ,SCREAMING_SNAKE_CASE_ : List[str]=False ,SCREAMING_SNAKE_CASE_ : Optional[int]=255 ,**SCREAMING_SNAKE_CASE_ : int ,) -> List[str]: """simple docstring""" super().__init__(**__snake_case ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = use_mask_token lowerCAmelCase = use_absolute_position_embeddings lowerCAmelCase = use_relative_position_bias lowerCAmelCase = use_shared_relative_position_bias lowerCAmelCase = layer_scale_init_value lowerCAmelCase = drop_path_rate lowerCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) lowerCAmelCase = out_indices lowerCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) lowerCAmelCase = use_auxiliary_head lowerCAmelCase = auxiliary_loss_weight lowerCAmelCase = auxiliary_channels lowerCAmelCase = auxiliary_num_convs lowerCAmelCase = auxiliary_concat_input lowerCAmelCase = semantic_loss_ignore_index class lowercase ( lowerCamelCase_ ): lowercase = version.parse('''1.11''' ) @property def UpperCAmelCase (self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase (self : Optional[int] ) -> float: """simple docstring""" return 1e-4
535
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor snake_case : Union[str, Any] = logging.get_logger(__name__) class snake_case_ (lowerCamelCase_ ): def __init__( self :Any ,*__snake_case :str ,**__snake_case :int ) -> None: warnings.warn( 'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PerceiverImageProcessor instead.' ,__snake_case ,) super().__init__(*__snake_case ,**__snake_case )
335
0
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase__ = pd.read_csv( '''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/''' '''position_salaries.csv''' ) lowerCAmelCase__ = dataset.iloc[:, 1:2].values lowerCAmelCase__ = dataset.iloc[:, 2].values lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase__ = PolynomialFeatures(degree=4) lowerCAmelCase__ = poly_reg.fit_transform(X) lowerCAmelCase__ = LinearRegression() pol_reg.fit(X_poly, y) def __lowerCamelCase ( ): """simple docstring""" plt.scatter(lowerCamelCase__ , lowerCamelCase__ , color="red" ) plt.plot(lowerCamelCase__ , pol_reg.predict(poly_reg.fit_transform(lowerCamelCase__ ) ) , color="blue" ) plt.title("Truth or Bluff (Linear Regression)" ) plt.xlabel("Position level" ) plt.ylabel("Salary" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
707
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Optional[Any] ): lowercase__ : Dict = tempfile.mkdtemp() # fmt: off lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) lowercase__ : Tuple = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def snake_case ( self : Any ): lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case ( self : int ): lowercase__ : Optional[int] = self.get_tokenizer() lowercase__ : List[Any] = self.get_rust_tokenizer() lowercase__ : List[str] = self.get_image_processor() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_slow.save_pretrained(self.tmpdirname ) lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_fast.save_pretrained(self.tmpdirname ) lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] ): lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : int = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.prepare_image_inputs() lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case ( self : str ): lowercase__ : Tuple = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Tuple = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = "lower newer" lowercase__ : str = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE ): processor() def snake_case ( self : Optional[Any] ): lowercase__ : Dict = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : List[str] = self.get_image_processor() lowercase__ : List[str] = self.get_tokenizer() lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = "lower newer" lowercase__ : Union[str, Any] = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
81
0
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _UpperCamelCase ( lowercase__=None , lowercase__=None ): return field(default_factory=lambda: default , metadata=__snake_case ) @dataclass class _lowercase : '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = field( metadata={'''help''': '''The csv file to plot.'''} , ) SCREAMING_SNAKE_CASE__ : str = field( default=snake_case__ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) SCREAMING_SNAKE_CASE__ : Dict = field( default=snake_case__ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) SCREAMING_SNAKE_CASE__ : Any = field( default=snake_case__ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = field( default=snake_case__ , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) SCREAMING_SNAKE_CASE__ : str = field( default=snake_case__ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) SCREAMING_SNAKE_CASE__ : Any = list_field( default=snake_case__ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def _UpperCamelCase ( lowercase__ ): try: int(__snake_case ) return True except ValueError: return False def _UpperCamelCase ( lowercase__ ): try: float(__snake_case ) return True except ValueError: return False class _lowercase : '''simple docstring''' def __init__( self :List[str] , lowerCAmelCase__ :Any ) -> List[Any]: __SCREAMING_SNAKE_CASE : str = args __SCREAMING_SNAKE_CASE : Optional[Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='''''' ) as csv_file: __SCREAMING_SNAKE_CASE : List[Any] = csv.DictReader(_lowerCAmelCase ) for row in reader: __SCREAMING_SNAKE_CASE : Any = row['''model'''] self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) ) self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) ) if can_convert_to_int(row['''result'''] ): # value is not None __SCREAMING_SNAKE_CASE : Optional[int] = int(row['''result'''] ) elif can_convert_to_float(row['''result'''] ): # value is not None __SCREAMING_SNAKE_CASE : Optional[Any] = float(row['''result'''] ) def __magic_name__( self :str ) -> Dict: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = plt.subplots() __SCREAMING_SNAKE_CASE : Dict = '''Time usage''' if self.args.is_time else '''Memory usage''' __SCREAMING_SNAKE_CASE : int = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference''' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('''log''' ) ax.set_yscale('''log''' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): __SCREAMING_SNAKE_CASE : Optional[int] = sorted(set(self.result_dict[model_name]['''bsz'''] ) ) __SCREAMING_SNAKE_CASE : Tuple = sorted(set(self.result_dict[model_name]['''seq_len'''] ) ) __SCREAMING_SNAKE_CASE : str = self.result_dict[model_name]['''result'''] ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : str = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __SCREAMING_SNAKE_CASE : int = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __SCREAMING_SNAKE_CASE : List[Any] = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCAmelCase , ) else: __SCREAMING_SNAKE_CASE : str = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : Optional[Any] = ( ('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''') ) __SCREAMING_SNAKE_CASE : List[str] = np.asarray(_lowerCAmelCase , _lowerCAmelCase )[: len(_lowerCAmelCase )] plt.scatter( _lowerCAmelCase , _lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(_lowerCAmelCase , _lowerCAmelCase , '''--''' ) title_str += f''' {label_model_name} vs.''' __SCREAMING_SNAKE_CASE : Tuple = title_str[:-4] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''Time in s''' if self.args.is_time else '''Memory in MB''' # plot plt.title(_lowerCAmelCase ) plt.xlabel(_lowerCAmelCase ) plt.ylabel(_lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : int = HfArgumentParser(__snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses()[0] __SCREAMING_SNAKE_CASE : List[str] = Plot(args=__snake_case ) plot.plot() if __name__ == "__main__": main()
696
from ...configuration_utils import PretrainedConfig class UpperCamelCase ( snake_case__ ): __UpperCamelCase = """bert-generation""" def __init__( self : Tuple ,_lowerCAmelCase : Union[str, Any]=50_358 ,_lowerCAmelCase : List[Any]=1_024 ,_lowerCAmelCase : str=24 ,_lowerCAmelCase : Any=16 ,_lowerCAmelCase : Any=4_096 ,_lowerCAmelCase : Any="gelu" ,_lowerCAmelCase : Optional[Any]=0.1 ,_lowerCAmelCase : Optional[Any]=0.1 ,_lowerCAmelCase : Optional[Any]=512 ,_lowerCAmelCase : Optional[Any]=0.0_2 ,_lowerCAmelCase : Union[str, Any]=1E-12 ,_lowerCAmelCase : Optional[int]=0 ,_lowerCAmelCase : Optional[int]=2 ,_lowerCAmelCase : Optional[Any]=1 ,_lowerCAmelCase : Any="absolute" ,_lowerCAmelCase : str=True ,**_lowerCAmelCase : List[Any] ,): """simple docstring""" super().__init__(pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase ) __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = hidden_act __snake_case = intermediate_size __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = position_embedding_type __snake_case = use_cache
524
0
'''simple docstring''' def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : Any = 0 while num > 0: digit_sum += num % 1_0 num //= 1_0 return digit_sum def UpperCAmelCase_ ( lowerCamelCase_ = 1_0_0 ): """simple docstring""" lowerCAmelCase__ : Tuple = 1 lowerCAmelCase__ : Union[str, Any] = 2 for i in range(2 , max_n + 1 ): lowerCAmelCase__ : List[str] = pre_numerator lowerCAmelCase__ : Any = 2 * i // 3 if i % 3 == 0 else 1 lowerCAmelCase__ : Any = cur_numerator lowerCAmelCase__ : Union[str, Any] = e_cont * pre_numerator + temp return sum_digits(lowerCamelCase_ ) if __name__ == "__main__": print(f'{solution() = }')
568
'''simple docstring''' def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) lowerCAmelCase__ : Any = str(bin(lowerCamelCase_ ) ) binary_number += "0" * shift_amount return binary_number def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) lowerCAmelCase__ : Any = str(bin(lowerCamelCase_ ) )[2:] if shift_amount >= len(lowerCamelCase_ ): return "0b0" lowerCAmelCase__ : Optional[int] = binary_number[: len(lowerCamelCase_ ) - shift_amount] return "0b" + shifted_binary_number def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if number >= 0: # Get binary representation of positive number lowerCAmelCase__ : Any = "0" + str(bin(lowerCamelCase_ ) ).strip("-" )[2:] else: # Get binary (2's complement) representation of negative number lowerCAmelCase__ : List[Any] = len(bin(lowerCamelCase_ )[3:] ) # Find 2's complement of number lowerCAmelCase__ : int = bin(abs(lowerCamelCase_ ) - (1 << binary_number_length) )[3:] lowerCAmelCase__ : Union[str, Any] = ( "1" + "0" * (binary_number_length - len(lowerCamelCase_ )) + binary_number ) if shift_amount >= len(lowerCamelCase_ ): return "0b" + binary_number[0] * len(lowerCamelCase_ ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(lowerCamelCase_ ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
568
1
import functools from typing import Any def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> bool: # Validation if not isinstance(__snake_case , __snake_case ) or len(__snake_case ) == 0: raise ValueError("""the string should be not empty string""" ) if not isinstance(__snake_case , __snake_case ) or not all( isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0 for item in words ): raise ValueError("""the words should be a list of non-empty strings""" ) # Build trie _UpperCAmelCase = {} _UpperCAmelCase = """WORD_KEEPER""" for word in words: _UpperCAmelCase = trie for c in word: if c not in trie_node: _UpperCAmelCase = {} _UpperCAmelCase = trie_node[c] _UpperCAmelCase = True _UpperCAmelCase = len(__snake_case ) # Dynamic programming method @functools.cache def is_breakable(__snake_case ) -> bool: if index == len_string: return True _UpperCAmelCase = trie for i in range(__snake_case , __snake_case ): _UpperCAmelCase = trie_node.get(string[i] , __snake_case ) if trie_node is None: return False if trie_node.get(__snake_case , __snake_case ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
108
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a: Dict = logging.get_logger(__name__) __a: Optional[int] = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _lowerCamelCase = '''efficientnet''' def __init__( self : Dict , lowerCamelCase : int = 3 , lowerCamelCase : int = 600 , lowerCamelCase : float = 2.0 , lowerCamelCase : float = 3.1 , lowerCamelCase : int = 8 , lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase : List[int] = [] , lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase : float = 0.25 , lowerCamelCase : str = "swish" , lowerCamelCase : int = 2560 , lowerCamelCase : str = "mean" , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 0.001 , lowerCamelCase : float = 0.99 , lowerCamelCase : float = 0.5 , lowerCamelCase : float = 0.2 , **lowerCamelCase : List[str] , ) -> Union[str, Any]: """simple docstring""" super().__init__(**lowerCamelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(lowerCamelCase ) * 4 class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _lowerCamelCase = version.parse('''1.11''' ) @property def lowerCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase ( self : Dict ) -> float: """simple docstring""" return 1E-5
108
1
'''simple docstring''' def lowerCamelCase__ ( a ): if not isinstance(a , a ): raise TypeError('Input value must be an \'int\' type' ) __snake_case = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
427
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers _lowercase = [ """python""", """tqdm""", """regex""", """requests""", """packaging""", """filelock""", """numpy""", """tokenizers""", """huggingface-hub""", """safetensors""", """accelerate""", """pyyaml""", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def lowerCamelCase__ ( a , a=None ): require_version(deps[pkg] , a )
427
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowerCamelCase : Optional[int] = False class A( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : str ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self : str ) -> str: """simple docstring""" lowerCamelCase_ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) lowerCamelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pipe.dual_guided( prompt='first prompt' , image=A_ , text_to_image_strength=0.75 , generator=A_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(A_ ) lowerCamelCase_ = VersatileDiffusionPipeline.from_pretrained(A_ , torch_dtype=torch.floataa ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) lowerCamelCase_ = generator.manual_seed(0 ) lowerCamelCase_ = pipe.dual_guided( prompt='first prompt' , image=A_ , text_to_image_strength=0.75 , generator=A_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def a__ ( self : str ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) lowerCamelCase_ = 'cyberpunk 2077' lowerCamelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pipe.dual_guided( prompt=A_ , image=A_ , text_to_image_strength=0.75 , generator=A_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images lowerCamelCase_ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase_ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowerCamelCase_ = 'A painting of a squirrel eating a burger ' lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pipe.text_to_image( prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images lowerCamelCase_ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowerCamelCase_ = pipe.image_variation(A_ , generator=A_ , output_type='numpy' ).images lowerCamelCase_ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase_ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
70
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys lowerCamelCase : List[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") lowerCamelCase : Tuple = ( subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split() ) lowerCamelCase : Tuple = "|".join(sys.argv[1:]) lowerCamelCase : Any = re.compile(rF"""^({joined_dirs}).*?\.py$""") lowerCamelCase : List[str] = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
70
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Any = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
715
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem UpperCamelCase : Dict = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 UpperCamelCase : List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def UpperCamelCase_ ( __a ) -> str: if "://" in dataset_path: a__ : Any = dataset_path.split("://" )[1] return dataset_path def UpperCamelCase_ ( __a ) -> bool: if fs is not None and fs.protocol != "file": return True else: return False def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = not is_remote_filesystem(__a ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__a ) , fs._strip_protocol(__a ) ) else: fs.mv(__a , __a , recursive=__a ) def UpperCamelCase_ ( ) -> None: if hasattr(fsspec.asyn , "reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: a__ : Tuple = None a__ : int = None a__ : int = threading.Lock()
151
0
"""simple docstring""" import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowerCamelCase_ ( UpperCAmelCase_ ) ->Any: """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Dict , __lowercase : List[Any] , __lowercase : Optional[int] ): '''simple docstring''' super().__init__() __UpperCAmelCase : List[Any] = module __UpperCAmelCase : List[str] = nn.Sequential( nn.Linear(module.in_features , snake_case_ , bias=snake_case_ ) , nn.Linear(snake_case_ , module.out_features , bias=snake_case_ ) , ) __UpperCAmelCase : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=snake_case_ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def A_ ( self : Optional[Any] , __lowercase : Tuple , *__lowercase : Dict , **__lowercase : List[Any] ): '''simple docstring''' return self.module(snake_case_ , *snake_case_ , **snake_case_ ) + self.adapter(snake_case_ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class snake_case ( unittest.TestCase ): '''simple docstring''' _A : Dict = 'bigscience/bloom-1b7' # Constant values _A : List[str] = 2.1_09_65_95_52_69_25_74 _A : Union[str, Any] = 'Hello my name is' _A : Union[str, Any] = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) _A : Any = 10 def A_ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = AutoTokenizer.from_pretrained(self.model_name ) class snake_case ( UpperCamelCase__ ): '''simple docstring''' def A_ ( self : Optional[Any] ): '''simple docstring''' super().setUp() # Models and tokenizer __UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map='''auto''' ) def A_ ( self : Optional[Any] ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def A_ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : int = self.model_abit.config self.assertTrue(hasattr(snake_case_ , '''quantization_config''' ) ) __UpperCAmelCase : Optional[int] = config.to_dict() __UpperCAmelCase : Any = config.to_diff_dict() __UpperCAmelCase : List[Any] = config.to_json_string() def A_ ( self : Dict ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __UpperCAmelCase : Tuple = self.model_fpaa.get_memory_footprint() __UpperCAmelCase : Tuple = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __UpperCAmelCase : Union[str, Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def A_ ( self : Tuple ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(snake_case_ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def A_ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.tokenizer(self.input_text , return_tensors='''pt''' ) __UpperCAmelCase : int = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS ) def A_ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Dict = BitsAndBytesConfig() __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=snake_case_ , device_map='''auto''' ) __UpperCAmelCase : Optional[int] = self.tokenizer(self.input_text , return_tensors='''pt''' ) __UpperCAmelCase : Optional[int] = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS ) def A_ ( self : str ): '''simple docstring''' with self.assertRaises(snake_case_ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(snake_case_ ) def A_ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = BitsAndBytesConfig() with self.assertRaises(snake_case_ ): __UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=snake_case_ , load_in_abit=snake_case_ , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def A_ ( self : Dict ): '''simple docstring''' with self.assertRaises(snake_case_ ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(snake_case_ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(snake_case_ ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(snake_case_ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(snake_case_ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __UpperCAmelCase : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ) __UpperCAmelCase : Tuple = self.model_fpaa.to(torch.floataa ) __UpperCAmelCase : Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __UpperCAmelCase : str = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __UpperCAmelCase : Dict = self.model_fpaa.half() # Check this does not throw an error __UpperCAmelCase : int = self.model_fpaa.float() def A_ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=snake_case_ , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class snake_case ( unittest.TestCase ): '''simple docstring''' @classmethod def A_ ( cls : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = '''t5-small''' __UpperCAmelCase : Any = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(cls.model_name ) __UpperCAmelCase : Dict = '''Translate in German: Hello, my dog is cute''' def A_ ( self : Any ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def A_ ( self : Optional[Any] ): '''simple docstring''' from transformers import TaForConditionalGeneration __UpperCAmelCase : str = TaForConditionalGeneration._keep_in_fpaa_modules __UpperCAmelCase : Optional[int] = None # test with `t5-small` __UpperCAmelCase : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map='''auto''' ) __UpperCAmelCase : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __UpperCAmelCase : List[Any] = model.generate(**snake_case_ ) # test with `flan-t5-small` __UpperCAmelCase : Optional[Any] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=snake_case_ , device_map='''auto''' ) __UpperCAmelCase : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __UpperCAmelCase : Optional[Any] = model.generate(**snake_case_ ) __UpperCAmelCase : List[Any] = modules def A_ ( self : List[str] ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __UpperCAmelCase : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __UpperCAmelCase : Optional[int] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __UpperCAmelCase : Optional[int] = model.generate(**snake_case_ ) # test with `flan-t5-small` __UpperCAmelCase : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=snake_case_ , device_map='''auto''' ) __UpperCAmelCase : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __UpperCAmelCase : List[Any] = model.generate(**snake_case_ ) class snake_case ( UpperCamelCase__ ): '''simple docstring''' def A_ ( self : Any ): '''simple docstring''' super().setUp() # model_name __UpperCAmelCase : Optional[int] = '''bigscience/bloom-560m''' __UpperCAmelCase : Optional[int] = '''t5-small''' # Different types of model __UpperCAmelCase : Any = AutoModel.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map='''auto''' ) # Sequence classification model __UpperCAmelCase : int = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=snake_case_ , device_map='''auto''' ) # CausalLM model __UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map='''auto''' ) # Seq2seq model __UpperCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=snake_case_ , device_map='''auto''' ) def A_ ( self : Dict ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def A_ ( self : Optional[Any] ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class snake_case ( UpperCamelCase__ ): '''simple docstring''' def A_ ( self : int ): '''simple docstring''' super().setUp() def A_ ( self : List[Any] ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def A_ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __UpperCAmelCase : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class snake_case ( UpperCamelCase__ ): '''simple docstring''' def A_ ( self : Any ): '''simple docstring''' super().setUp() def A_ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=snake_case_ , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __UpperCAmelCase : Optional[int] = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __UpperCAmelCase : Optional[Any] = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS ) class snake_case ( UpperCamelCase__ ): '''simple docstring''' def A_ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = '''facebook/opt-350m''' super().setUp() def A_ ( self : Dict ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __UpperCAmelCase : Union[str, Any] = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __UpperCAmelCase : Union[str, Any] = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(snake_case_ ) ): __UpperCAmelCase : Tuple = LoRALayer(module.q_proj , rank=16 ) __UpperCAmelCase : int = LoRALayer(module.k_proj , rank=16 ) __UpperCAmelCase : List[str] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __UpperCAmelCase : int = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __UpperCAmelCase : Union[str, Any] = model.forward(**snake_case_ ) out.logits.norm().backward() for module in model.modules(): if isinstance(snake_case_ , snake_case_ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(snake_case_ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class snake_case ( UpperCamelCase__ ): '''simple docstring''' _A : int = 'gpt2-xl' _A : Union[str, Any] = 3.31_91_85_48_54_15_21_87
522
def lowerCAmelCase_ ( __a , __a , __a ) -> int: """simple docstring""" def count_of_possible_combinations(__a ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__a ) def lowerCAmelCase_ ( __a , __a , __a ) -> int: """simple docstring""" def count_of_possible_combinations_with_dp_array( __a , __a ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] SCREAMING_SNAKE_CASE : List[Any] =sum( count_of_possible_combinations_with_dp_array(target - item , __a ) for item in array ) SCREAMING_SNAKE_CASE : List[str] =answer return answer SCREAMING_SNAKE_CASE : int =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__a , __a ) def lowerCAmelCase_ ( __a , __a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] =[0] * (target + 1) SCREAMING_SNAKE_CASE : Optional[Any] =1 for i in range(1 , target + 1 ): for j in range(__a ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() _A = 3 _A = 5 _A = [1, 2, 5] print(combination_sum_iv(n, array, target))
258
0
def A ( lowercase__ : List[str]=2_8123 ) -> Union[str, Any]: UpperCamelCase__ :Optional[Any] = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i UpperCamelCase__ :Optional[int] = set() UpperCamelCase__ :Optional[Any] = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(lowercase__ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
383
import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training") # TF training parameters UpperCamelCase = False UpperCamelCase = False def A ( lowercase__ : Namespace ) -> Dict: return TrainCommand(lowercase__ ) class lowerCAmelCase_ ( lowercase ): """simple docstring""" @staticmethod def __a ( lowerCamelCase__ :ArgumentParser ): UpperCamelCase__ :int = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" ) train_parser.add_argument( """--train_data""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , ) train_parser.add_argument( """--column_label""" , type=lowerCamelCase__ , default=0 , help="""Column of the dataset csv file with example labels.""" ) train_parser.add_argument( """--column_text""" , type=lowerCamelCase__ , default=1 , help="""Column of the dataset csv file with example texts.""" ) train_parser.add_argument( """--column_id""" , type=lowerCamelCase__ , default=2 , help="""Column of the dataset csv file with example ids.""" ) train_parser.add_argument( """--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" ) train_parser.add_argument("""--validation_data""" , type=lowerCamelCase__ , default="""""" , help="""path to validation dataset.""" ) train_parser.add_argument( """--validation_split""" , type=lowerCamelCase__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , ) train_parser.add_argument("""--output""" , type=lowerCamelCase__ , default="""./""" , help="""path to saved the trained model.""" ) train_parser.add_argument( """--task""" , type=lowerCamelCase__ , default="""text_classification""" , help="""Task to train the model on.""" ) train_parser.add_argument( """--model""" , type=lowerCamelCase__ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" ) train_parser.add_argument("""--train_batch_size""" , type=lowerCamelCase__ , default=32 , help="""Batch size for training.""" ) train_parser.add_argument("""--valid_batch_size""" , type=lowerCamelCase__ , default=64 , help="""Batch size for validation.""" ) train_parser.add_argument("""--learning_rate""" , type=lowerCamelCase__ , default=3e-5 , help="""Learning rate.""" ) train_parser.add_argument("""--adam_epsilon""" , type=lowerCamelCase__ , default=1e-08 , help="""Epsilon for Adam optimizer.""" ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self :int , lowerCamelCase__ :Namespace ): UpperCamelCase__ :List[Any] = logging.get_logger("""transformers-cli/training""" ) UpperCamelCase__ :Optional[Any] = """tf""" if is_tf_available() else """torch""" os.makedirs(args.output , exist_ok=lowerCamelCase__ ) UpperCamelCase__ :int = args.output UpperCamelCase__ :Optional[Any] = args.column_label UpperCamelCase__ :Any = args.column_text UpperCamelCase__ :Tuple = args.column_id self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" ) if args.task == "text_classification": UpperCamelCase__ :int = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f"""Loading dataset from {args.train_data}""" ) UpperCamelCase__ :Any = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) UpperCamelCase__ :List[Any] = None if args.validation_data: self.logger.info(f"""Loading validation dataset from {args.validation_data}""" ) UpperCamelCase__ :List[str] = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) UpperCamelCase__ :Optional[int] = args.validation_split UpperCamelCase__ :int = args.train_batch_size UpperCamelCase__ :str = args.valid_batch_size UpperCamelCase__ :Any = args.learning_rate UpperCamelCase__ :List[str] = args.adam_epsilon def __a ( self :Optional[int] ): if self.framework == "tf": return self.run_tf() return self.run_torch() def __a ( self :List[Any] ): raise NotImplementedError def __a ( self :Dict ): self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
383
1
from math import pi, sqrt, tan def _a ( UpperCamelCase_ : Optional[int] ) -> str: """simple docstring""" if side_length < 0: raise ValueError("surface_area_cube() only accepts non-negative values" ) return 6 * side_length**2 def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] ) -> Optional[int]: """simple docstring""" if length < 0 or breadth < 0 or height < 0: raise ValueError("surface_area_cuboid() only accepts non-negative values" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def _a ( UpperCamelCase_ : List[Any] ) -> Optional[int]: """simple docstring""" if radius < 0: raise ValueError("surface_area_sphere() only accepts non-negative values" ) return 4 * pi * radius**2 def _a ( UpperCamelCase_ : str ) -> List[str]: """simple docstring""" if radius < 0: raise ValueError("surface_area_hemisphere() only accepts non-negative values" ) return 3 * pi * radius**2 def _a ( UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ) -> Optional[int]: """simple docstring""" if radius < 0 or height < 0: raise ValueError("surface_area_cone() only accepts non-negative values" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] ) -> Dict: """simple docstring""" if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( "surface_area_conical_frustum() only accepts non-negative values" ) lowerCAmelCase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] ) -> List[Any]: """simple docstring""" if radius < 0 or height < 0: raise ValueError("surface_area_cylinder() only accepts non-negative values" ) return 2 * pi * radius * (height + radius) def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> List[Any]: """simple docstring""" if torus_radius < 0 or tube_radius < 0: raise ValueError("surface_area_torus() only accepts non-negative values" ) if torus_radius < tube_radius: raise ValueError( "surface_area_torus() does not support spindle or self intersecting tori" ) return 4 * pow(snake_case_ , 2 ) * torus_radius * tube_radius def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> Dict: """simple docstring""" if length < 0 or width < 0: raise ValueError("area_rectangle() only accepts non-negative values" ) return length * width def _a ( UpperCamelCase_ : Optional[int] ) -> Dict: """simple docstring""" if side_length < 0: raise ValueError("area_square() only accepts non-negative values" ) return side_length**2 def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ) -> Any: """simple docstring""" if base < 0 or height < 0: raise ValueError("area_triangle() only accepts non-negative values" ) return (base * height) / 2 def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] ) -> Tuple: """simple docstring""" if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("area_triangle_three_sides() only accepts non-negative values" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("Given three sides do not form a triangle" ) lowerCAmelCase__ = (sidea + sidea + sidea) / 2 lowerCAmelCase__ = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Union[str, Any]: """simple docstring""" if base < 0 or height < 0: raise ValueError("area_parallelogram() only accepts non-negative values" ) return base * height def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> Any: """simple docstring""" if basea < 0 or basea < 0 or height < 0: raise ValueError("area_trapezium() only accepts non-negative values" ) return 1 / 2 * (basea + basea) * height def _a ( UpperCamelCase_ : int ) -> Tuple: """simple docstring""" if radius < 0: raise ValueError("area_circle() only accepts non-negative values" ) return pi * radius**2 def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] ) -> str: """simple docstring""" if radius_x < 0 or radius_y < 0: raise ValueError("area_ellipse() only accepts non-negative values" ) return pi * radius_x * radius_y def _a ( UpperCamelCase_ : int , UpperCamelCase_ : str ) -> List[Any]: """simple docstring""" if diagonal_a < 0 or diagonal_a < 0: raise ValueError("area_rhombus() only accepts non-negative values" ) return 1 / 2 * diagonal_a * diagonal_a def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> List[Any]: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or sides < 3: raise ValueError( "area_reg_polygon() only accepts integers greater than or \\nequal to three as number of sides" ) elif length < 0: raise ValueError( "area_reg_polygon() only accepts non-negative values as \\nlength of a side" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('''[DEMO] Areas of various geometric shapes: \n''') print(F"Rectangle: {area_rectangle(10, 20) = }") print(F"Square: {area_square(10) = }") print(F"Triangle: {area_triangle(10, 10) = }") print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }") print(F"Parallelogram: {area_parallelogram(10, 20) = }") print(F"Rhombus: {area_rhombus(10, 20) = }") print(F"Trapezium: {area_trapezium(10, 20, 30) = }") print(F"Circle: {area_circle(20) = }") print(F"Ellipse: {area_ellipse(10, 20) = }") print('''\nSurface Areas of various geometric shapes: \n''') print(F"Cube: {surface_area_cube(20) = }") print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }") print(F"Sphere: {surface_area_sphere(20) = }") print(F"Hemisphere: {surface_area_hemisphere(20) = }") print(F"Cone: {surface_area_cone(10, 20) = }") print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(F"Cylinder: {surface_area_cylinder(10, 20) = }") print(F"Torus: {surface_area_torus(20, 10) = }") print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }") print(F"Square: {area_reg_polygon(4, 10) = }") print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
339
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowercase ( UpperCamelCase__ ): _a = ["image_processor", "tokenizer"] _a = "OwlViTImageProcessor" _a = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , _a=None , _a=None , **_a ) -> List[str]: _A : int = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _a , ) _A : Optional[Any] = kwargs.pop("""feature_extractor""" ) _A : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a="max_length" , _a="np" , **_a ) -> int: if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(_a , _a ) or (isinstance(_a , _a ) and not isinstance(text[0] , _a )): _A : Optional[int] = [self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )] elif isinstance(_a , _a ) and isinstance(text[0] , _a ): _A : Tuple = [] # Maximum number of queries across batch _A : Optional[int] = max([len(_a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_a ) != max_num_queries: _A : Optional[Any] = t + [""" """] * (max_num_queries - len(_a )) _A : Union[str, Any] = self.tokenizer(_a , padding=_a , return_tensors=_a , **_a ) encodings.append(_a ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": _A : Union[str, Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) _A : Optional[Any] = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _A : Optional[Any] = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) _A : int = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _A : Any = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) _A : List[str] = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _A : Optional[Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) _A : List[Any] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) _A : List[Any] = BatchEncoding() _A : Optional[Any] = input_ids _A : str = attention_mask if query_images is not None: _A : Tuple = BatchEncoding() _A : Dict = self.image_processor( _a , return_tensors=_a , **_a ).pixel_values _A : Optional[Any] = query_pixel_values if images is not None: _A : Dict = self.image_processor(_a , return_tensors=_a , **_a ) if text is not None and images is not None: _A : Optional[int] = image_features.pixel_values return encoding elif query_images is not None and images is not None: _A : Union[str, Any] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def a__ ( self , *_a , **_a ) -> List[Any]: return self.image_processor.post_process(*_a , **_a ) def a__ ( self , *_a , **_a ) -> List[str]: return self.image_processor.post_process_object_detection(*_a , **_a ) def a__ ( self , *_a , **_a ) -> Optional[Any]: return self.image_processor.post_process_image_guided_detection(*_a , **_a ) def a__ ( self , *_a , **_a ) -> str: return self.tokenizer.batch_decode(*_a , **_a ) def a__ ( self , *_a , **_a ) -> Tuple: return self.tokenizer.decode(*_a , **_a ) @property def a__ ( self ) -> Tuple: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , ) return self.image_processor_class @property def a__ ( self ) -> Union[str, Any]: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , ) return self.image_processor
307
0
from collections.abc import Sequence def a_ ( _A , _A ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(_A ) ) def a_ ( _A , _A ) -> float: """simple docstring""" snake_case__ = 0.0 for coeff in reversed(_A ): snake_case__ = result * x + coeff return result if __name__ == "__main__": __UpperCamelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) __UpperCamelCase : Dict = 1_0.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
372
from __future__ import annotations def a_ ( _A ) -> bool: """simple docstring""" return len(set(_A ) ) == len(_A ) if __name__ == "__main__": import doctest doctest.testmod()
372
1
'''simple docstring''' from jiwer import compute_measures import datasets _a : str = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' _a : List[str] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' _a : List[str] = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def snake_case_ ( self ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[ 'https://en.wikipedia.org/wiki/Word_error_rate', ] , ) def snake_case_ ( self , a_=None , a_=None , a_=False ) -> Any: """simple docstring""" if concatenate_texts: return compute_measures(a_ , a_ )["wer"] else: UpperCAmelCase = 0 UpperCAmelCase = 0 for prediction, reference in zip(a_ , a_ ): UpperCAmelCase = compute_measures(a_ , a_ ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
447
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _a : Dict = { 'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'], 'tokenization_xlm': ['XLMTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = [ 'XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMForMultipleChoice', 'XLMForQuestionAnswering', 'XLMForQuestionAnsweringSimple', 'XLMForSequenceClassification', 'XLMForTokenClassification', 'XLMModel', 'XLMPreTrainedModel', 'XLMWithLMHeadModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : int = [ 'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMForMultipleChoice', 'TFXLMForQuestionAnsweringSimple', 'TFXLMForSequenceClassification', 'TFXLMForTokenClassification', 'TFXLMMainLayer', 'TFXLMModel', 'TFXLMPreTrainedModel', 'TFXLMWithLMHeadModel', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys _a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
447
1
from ...configuration_utils import PretrainedConfig UpperCamelCase = { "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json" ), } class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : Dict = """tapas""" def __init__( self :List[Any] , lowerCamelCase__ :List[str]=3_05_22 , lowerCamelCase__ :str=7_68 , lowerCamelCase__ :List[Any]=12 , lowerCamelCase__ :Any=12 , lowerCamelCase__ :Tuple=30_72 , lowerCamelCase__ :int="gelu" , lowerCamelCase__ :Dict=0.1 , lowerCamelCase__ :str=0.1 , lowerCamelCase__ :List[str]=10_24 , lowerCamelCase__ :List[Any]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , lowerCamelCase__ :Tuple=0.02 , lowerCamelCase__ :str=1e-12 , lowerCamelCase__ :str=0 , lowerCamelCase__ :Optional[int]=10.0 , lowerCamelCase__ :int=0 , lowerCamelCase__ :Dict=1.0 , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :Optional[int]=1.0 , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :Any=None , lowerCamelCase__ :Optional[int]=1.0 , lowerCamelCase__ :Union[str, Any]=1.0 , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :Any="ratio" , lowerCamelCase__ :int=None , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :int=64 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[str]=False , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :str=False , lowerCamelCase__ :int=False , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :str=None , lowerCamelCase__ :List[Any]=None , **lowerCamelCase__ :Optional[Any] , ): super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) UpperCamelCase__ :List[Any] = vocab_size UpperCamelCase__ :Optional[int] = hidden_size UpperCamelCase__ :Any = num_hidden_layers UpperCamelCase__ :str = num_attention_heads UpperCamelCase__ :Dict = hidden_act UpperCamelCase__ :Tuple = intermediate_size UpperCamelCase__ :int = hidden_dropout_prob UpperCamelCase__ :List[str] = attention_probs_dropout_prob UpperCamelCase__ :Any = max_position_embeddings UpperCamelCase__ :List[Any] = type_vocab_sizes UpperCamelCase__ :List[Any] = initializer_range UpperCamelCase__ :List[str] = layer_norm_eps # Fine-tuning task hyperparameters UpperCamelCase__ :List[str] = positive_label_weight UpperCamelCase__ :int = num_aggregation_labels UpperCamelCase__ :str = aggregation_loss_weight UpperCamelCase__ :Optional[Any] = use_answer_as_supervision UpperCamelCase__ :Tuple = answer_loss_importance UpperCamelCase__ :Dict = use_normalized_answer_loss UpperCamelCase__ :Optional[Any] = huber_loss_delta UpperCamelCase__ :Any = temperature UpperCamelCase__ :Union[str, Any] = aggregation_temperature UpperCamelCase__ :Tuple = use_gumbel_for_cells UpperCamelCase__ :Tuple = use_gumbel_for_aggregation UpperCamelCase__ :Optional[int] = average_approximation_function UpperCamelCase__ :Optional[Any] = cell_selection_preference UpperCamelCase__ :Any = answer_loss_cutoff UpperCamelCase__ :Dict = max_num_rows UpperCamelCase__ :Optional[int] = max_num_columns UpperCamelCase__ :Tuple = average_logits_per_cell UpperCamelCase__ :Any = select_one_column UpperCamelCase__ :Dict = allow_empty_column_selection UpperCamelCase__ :Union[str, Any] = init_cell_selection_weights_to_zero UpperCamelCase__ :Optional[Any] = reset_position_index_per_cell UpperCamelCase__ :List[str] = disable_per_token_loss # Aggregation hyperparameters UpperCamelCase__ :Tuple = aggregation_labels UpperCamelCase__ :str = no_aggregation_label_index if isinstance(self.aggregation_labels , lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = {int(lowerCamelCase__ ): v for k, v in aggregation_labels.items()}
383
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) UpperCamelCase = logging.getLogger() def A ( lowercase__ : str ) -> str: UpperCamelCase__ :int = {} UpperCamelCase__ :List[str] = os.path.join(lowercase__ , """all_results.json""" ) if os.path.exists(lowercase__ ): with open(lowercase__ , """r""" ) as f: UpperCamelCase__ :List[Any] = json.load(lowercase__ ) else: raise ValueError(f"""can't find {path}""" ) return results UpperCamelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __a ( self :Dict ): import xla_spawn UpperCamelCase__ :Optional[int] = self.get_auto_remove_tmp_dir() UpperCamelCase__ :int = f""" ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ): UpperCamelCase__ :Any = time() xla_spawn.main() UpperCamelCase__ :Optional[Any] = time() UpperCamelCase__ :Optional[Any] = get_results(lowerCamelCase__ ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 5_00 ) def __a ( self :Union[str, Any] ): import xla_spawn UpperCamelCase__ :List[str] = """ ./tests/test_trainer_tpu.py --num_cores=8 ./tests/test_trainer_tpu.py """.split() with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ): xla_spawn.main()
383
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case : str = { 'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : List[Any] = [ 'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST', 'NezhaForNextSentencePrediction', 'NezhaForMaskedLM', 'NezhaForPreTraining', 'NezhaForMultipleChoice', 'NezhaForQuestionAnswering', 'NezhaForSequenceClassification', 'NezhaForTokenClassification', 'NezhaModel', 'NezhaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
605
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ): """simple docstring""" _SCREAMING_SNAKE_CASE = BertConfig.from_json_file(UpperCAmelCase__ ) print(f'''Building PyTorch model from configuration: {config}''' ) _SCREAMING_SNAKE_CASE = BertForPreTraining(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_bert(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() ,UpperCAmelCase__ ) if __name__ == "__main__": snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--bert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) snake_case : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
605
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __magic_name__ : Tuple = { '''configuration_efficientnet''': [ '''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientNetConfig''', '''EfficientNetOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : List[str] = ['''EfficientNetImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : Union[str, Any] = [ '''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientNetForImageClassification''', '''EfficientNetModel''', '''EfficientNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys __magic_name__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
410
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer __magic_name__ : Optional[Any] = logging.get_logger(__name__) __magic_name__ : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __magic_name__ : str = { '''vocab_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-openqa''': ( '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-reader''': ( '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-openqa''': ( '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-reader''': ( '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json''' ), }, } __magic_name__ : Any = { '''google/realm-cc-news-pretrained-embedder''': 512, '''google/realm-cc-news-pretrained-encoder''': 512, '''google/realm-cc-news-pretrained-scorer''': 512, '''google/realm-cc-news-pretrained-openqa''': 512, '''google/realm-orqa-nq-openqa''': 512, '''google/realm-orqa-nq-reader''': 512, '''google/realm-orqa-wq-openqa''': 512, '''google/realm-orqa-wq-reader''': 512, } __magic_name__ : Any = { '''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-reader''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-reader''': {'''do_lower_case''': True}, } class A__ ( __snake_case ): '''simple docstring''' snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = RealmTokenizer def __init__( self : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Tuple="[UNK]" , _SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" , _SCREAMING_SNAKE_CASE : Dict="[PAD]" , _SCREAMING_SNAKE_CASE : Any="[CLS]" , _SCREAMING_SNAKE_CASE : int="[MASK]" , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : List[str]=None , **_SCREAMING_SNAKE_CASE : int , ): """simple docstring""" super().__init__( _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _SCREAMING_SNAKE_CASE ) != do_lower_case or normalizer_state.get('strip_accents' , _SCREAMING_SNAKE_CASE ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars ): UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) ) UpperCamelCase = do_lower_case UpperCamelCase = strip_accents UpperCamelCase = tokenize_chinese_chars UpperCamelCase = normalizer_class(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = do_lower_case def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : int ): """simple docstring""" UpperCamelCase = PaddingStrategy.MAX_LENGTH UpperCamelCase = text UpperCamelCase = kwargs.pop('text_pair' , _SCREAMING_SNAKE_CASE ) UpperCamelCase = kwargs.pop('return_tensors' , _SCREAMING_SNAKE_CASE ) UpperCamelCase = { 'input_ids': [], 'attention_mask': [], 'token_type_ids': [], } for idx, candidate_text in enumerate(_SCREAMING_SNAKE_CASE ): if batch_text_pair is not None: UpperCamelCase = batch_text_pair[idx] else: UpperCamelCase = None UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCamelCase = encoded_candidates.get('input_ids' ) UpperCamelCase = encoded_candidates.get('attention_mask' ) UpperCamelCase = encoded_candidates.get('token_type_ids' ) if encoded_input_ids is not None: output_data["input_ids"].append(_SCREAMING_SNAKE_CASE ) if encoded_attention_mask is not None: output_data["attention_mask"].append(_SCREAMING_SNAKE_CASE ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(_SCREAMING_SNAKE_CASE ) UpperCamelCase = {key: item for key, item in output_data.items() if len(_SCREAMING_SNAKE_CASE ) != 0} return BatchEncoding(_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ): """simple docstring""" UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE ) return tuple(_SCREAMING_SNAKE_CASE )
410
1
"""simple docstring""" import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def a ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ :Optional[int] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(_UpperCAmelCase, _UpperCAmelCase ) def a ( __snake_case : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ :str = emb.weight.shape UpperCAmelCase_ :Dict = nn.Linear(_UpperCAmelCase, _UpperCAmelCase, bias=_UpperCAmelCase ) UpperCAmelCase_ :Optional[Any] = emb.weight.data return lin_layer def a ( __snake_case : Dict ): '''simple docstring''' UpperCAmelCase_ :Dict = torch.load(_UpperCAmelCase, map_location='''cpu''' ) UpperCAmelCase_ :Tuple = mam_aaa["args"] or mam_aaa["cfg"]["model"] UpperCAmelCase_ :Union[str, Any] = mam_aaa["model"] remove_ignore_keys_(_UpperCAmelCase ) UpperCAmelCase_ :List[str] = state_dict["encoder.embed_tokens.weight"].shape[0] UpperCAmelCase_ :Any = MaMaaaConfig( vocab_size=_UpperCAmelCase, max_position_embeddings=1024, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', ) UpperCAmelCase_ :Optional[Any] = state_dict["decoder.embed_tokens.weight"] UpperCAmelCase_ :Optional[Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase ) model.model.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase ) UpperCAmelCase_ :Optional[int] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") __lowerCamelCase = parser.parse_args() __lowerCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
608
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase_ : int = { """configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""], """tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ : Any = [ """TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """AdaptiveEmbedding""", """TransfoXLForSequenceClassification""", """TransfoXLLMHeadModel""", """TransfoXLModel""", """TransfoXLPreTrainedModel""", """load_tf_weights_in_transfo_xl""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ : Tuple = [ """TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAdaptiveEmbedding""", """TFTransfoXLForSequenceClassification""", """TFTransfoXLLMHeadModel""", """TFTransfoXLMainLayer""", """TFTransfoXLModel""", """TFTransfoXLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
461
0
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model") lowerCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_bpe.model") lowerCamelCase_ = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class __a ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" _A : List[Any] = CamembertTokenizer _A : List[Any] = CamembertTokenizerFast _A : Optional[Any] = True _A : int = True def __A ( self : Tuple ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ =CamembertTokenizer(_UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self : List[str] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ ="""<pad>""" SCREAMING_SNAKE_CASE__ =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) ,_UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) ,_UpperCamelCase ) def __A ( self : int ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<s>NOTUSED""" ) self.assertEqual(vocab_keys[1] ,"""<pad>""" ) self.assertEqual(vocab_keys[-1] ,"""<mask>""" ) self.assertEqual(len(_UpperCamelCase ) ,1_0_0_4 ) def __A ( self : Any ) -> str: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_5 ) def __A ( self : List[Any] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ =CamembertTokenizer(_UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ =CamembertTokenizerFast.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ ="""I was born in 92000, and this is falsé.""" SCREAMING_SNAKE_CASE__ =tokenizer.encode(_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =rust_tokenizer.encode(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase ,_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =rust_tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase ,_UpperCamelCase ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) SCREAMING_SNAKE_CASE__ =tokenizer.convert_ids_to_tokens(_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =rust_tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase ,_UpperCamelCase ) def __A ( self : int ) -> Dict: '''simple docstring''' if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ =self.get_tokenizer() SCREAMING_SNAKE_CASE__ =self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ ="""I was born in 92000, and this is falsé.""" SCREAMING_SNAKE_CASE__ =tokenizer.tokenize(_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =rust_tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase ,_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =rust_tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase ,_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ =tokenizer.encode(_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =rust_tokenizer.encode(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase ,_UpperCamelCase ) @slow def __A ( self : Optional[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ ={"""input_ids""": [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. SCREAMING_SNAKE_CASE__ =[ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=_UpperCamelCase ,model_name="""camembert-base""" ,revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" ,sequences=_UpperCamelCase ,)
588
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging lowerCamelCase_ = logging.get_logger(__name__) class __a : """simple docstring""" _A : str _A : str = None @staticmethod def __A ( ) -> Optional[Any]: '''simple docstring''' raise NotImplementedError def __A ( self : Optional[Any] ,_UpperCamelCase : List[Any] ,_UpperCamelCase : int ,_UpperCamelCase : str ,**_UpperCamelCase : Any ) -> List[Any]: '''simple docstring''' raise NotImplementedError def __A ( self : Tuple ,_UpperCamelCase : Optional[Any] ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def __A ( self : List[str] ) -> Dict: '''simple docstring''' if not self.is_available(): raise RuntimeError( f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def __A ( cls : List[str] ) -> Tuple: '''simple docstring''' return f"""`pip install {cls.pip_package or cls.name}`""" class __a ( __lowerCamelCase ): """simple docstring""" _A : Any = "optuna" @staticmethod def __A ( ) -> int: '''simple docstring''' return is_optuna_available() def __A ( self : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : int ,_UpperCamelCase : str ,**_UpperCamelCase : Dict ) -> str: '''simple docstring''' return run_hp_search_optuna(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ) def __A ( self : Any ,_UpperCamelCase : Union[str, Any] ) -> Optional[int]: '''simple docstring''' return default_hp_space_optuna(_UpperCamelCase ) class __a ( __lowerCamelCase ): """simple docstring""" _A : Dict = "ray" _A : int = "'ray[tune]'" @staticmethod def __A ( ) -> List[str]: '''simple docstring''' return is_ray_available() def __A ( self : Union[str, Any] ,_UpperCamelCase : str ,_UpperCamelCase : int ,_UpperCamelCase : str ,**_UpperCamelCase : str ) -> List[str]: '''simple docstring''' return run_hp_search_ray(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ) def __A ( self : int ,_UpperCamelCase : List[str] ) -> int: '''simple docstring''' return default_hp_space_ray(_UpperCamelCase ) class __a ( __lowerCamelCase ): """simple docstring""" _A : Union[str, Any] = "sigopt" @staticmethod def __A ( ) -> List[Any]: '''simple docstring''' return is_sigopt_available() def __A ( self : Any ,_UpperCamelCase : List[Any] ,_UpperCamelCase : int ,_UpperCamelCase : str ,**_UpperCamelCase : Optional[Any] ) -> Dict: '''simple docstring''' return run_hp_search_sigopt(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ) def __A ( self : List[str] ,_UpperCamelCase : Tuple ) -> Dict: '''simple docstring''' return default_hp_space_sigopt(_UpperCamelCase ) class __a ( __lowerCamelCase ): """simple docstring""" _A : Dict = "wandb" @staticmethod def __A ( ) -> int: '''simple docstring''' return is_wandb_available() def __A ( self : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : int ,_UpperCamelCase : str ,**_UpperCamelCase : List[Any] ) -> Tuple: '''simple docstring''' return run_hp_search_wandb(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ) def __A ( self : int ,_UpperCamelCase : List[str] ) -> List[Any]: '''simple docstring''' return default_hp_space_wandb(_UpperCamelCase ) lowerCamelCase_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def UpperCAmelCase_ ( ): SCREAMING_SNAKE_CASE__ =[backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE__ =available_backends[0].name if len(__UpperCamelCase ) > 1: logger.info( f"""{len(__UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
588
1