code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( _a, _a, _a, unittest.TestCase ): lowerCamelCase_ : Tuple = StableDiffusionInpaintPipeline lowerCamelCase_ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowerCamelCase_ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCamelCase_ : str = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCamelCase_ : Union[str, Any] = frozenset([] ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__magic_name__ , ) snake_case_ : Dict = PNDMScheduler(skip_prk_steps=__magic_name__ ) torch.manual_seed(0 ) snake_case_ : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) snake_case_ : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) snake_case_ : Any = CLIPTextModel(__magic_name__ ) snake_case_ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case_ : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowerCamelCase (self , __magic_name__ , __magic_name__=0 ) -> Optional[Any]: '''simple docstring''' snake_case_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case_ : Optional[Any] = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' ).resize((64, 64) ) snake_case_ : List[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) ) if str(__magic_name__ ).startswith('''mps''' ): snake_case_ : Any = torch.manual_seed(__magic_name__ ) else: snake_case_ : List[str] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) snake_case_ : int = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ : Dict = self.get_dummy_components() snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline(**__magic_name__ ) snake_case_ : List[Any] = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) snake_case_ : Tuple = self.get_dummy_inputs(__magic_name__ ) snake_case_ : Tuple = sd_pipe(**__magic_name__ ).images snake_case_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ : Tuple = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) snake_case_ : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) snake_case_ : str = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) snake_case_ : Tuple = '''stabilityai/stable-diffusion-2-inpainting''' snake_case_ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__magic_name__ , safety_checker=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) pipe.enable_attention_slicing() snake_case_ : List[Any] = '''Face of a yellow cat, high resolution, sitting on a park bench''' snake_case_ : Optional[Any] = torch.manual_seed(0 ) snake_case_ : List[str] = pipe( prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , generator=__magic_name__ , output_type='''np''' , ) snake_case_ : List[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9e-3 def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) snake_case_ : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) snake_case_ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) snake_case_ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting''' snake_case_ : int = StableDiffusionInpaintPipeline.from_pretrained( __magic_name__ , torch_dtype=torch.floataa , safety_checker=__magic_name__ , ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) pipe.enable_attention_slicing() snake_case_ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench''' snake_case_ : int = torch.manual_seed(0 ) snake_case_ : Dict = pipe( prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , generator=__magic_name__ , output_type='''np''' , ) snake_case_ : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowerCamelCase (self ) -> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) snake_case_ : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) snake_case_ : List[str] = '''stabilityai/stable-diffusion-2-inpainting''' snake_case_ : Any = PNDMScheduler.from_pretrained(__magic_name__ , subfolder='''scheduler''' ) snake_case_ : Dict = StableDiffusionInpaintPipeline.from_pretrained( __magic_name__ , safety_checker=__magic_name__ , scheduler=__magic_name__ , torch_dtype=torch.floataa , ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ : int = '''Face of a yellow cat, high resolution, sitting on a park bench''' snake_case_ : Optional[int] = torch.manual_seed(0 ) snake_case_ : List[Any] = pipe( prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , generator=__magic_name__ , num_inference_steps=2 , output_type='''np''' , ) snake_case_ : Union[str, Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
279
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class __lowerCAmelCase : def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' return None class __lowerCAmelCase : def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' return None class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ ) @require_torch @slow def lowerCamelCase (self ) -> int: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ ) @require_torch @slow def lowerCamelCase (self ) -> int: '''simple docstring''' from transformers import BertModel snake_case_ : str = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(__magic_name__ ) ) vocab_file.flush() snake_case_ : Optional[Any] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: snake_case_ : str = BertModel(BertConfig(vocab_size=len(__magic_name__ ) ) ) model.save_pretrained(__magic_name__ ) self._test_export(__magic_name__ , '''pt''' , 12 , __magic_name__ ) @require_tf @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: snake_case_ : Tuple = self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ ) snake_case_ : List[str] = quantize(Path(__magic_name__ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: snake_case_ : Any = self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ ) snake_case_ : Any = quantize(__magic_name__ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: snake_case_ : List[str] = Path(__magic_name__ ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) return path except Exception as e: self.fail(__magic_name__ ) @require_torch @require_tokenizers @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' from transformers import BertModel snake_case_ : Optional[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) snake_case_ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''pt''' ) @require_tf @require_tokenizers @slow def lowerCamelCase (self ) -> List[str]: '''simple docstring''' from transformers import TFBertModel snake_case_ : Any = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) snake_case_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''tf''' ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : Tuple = FeatureExtractionPipeline(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = infer_shapes(__magic_name__ , __magic_name__ ) # Assert all variables are present self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __magic_name__ ) self.assertSequenceEqual(variable_names[3:] , __magic_name__ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] snake_case_ : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} snake_case_ , snake_case_ : Tuple = ensure_valid_input(FuncContiguousArgs() , __magic_name__ , __magic_name__ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__magic_name__ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__magic_name__ ) , set(__magic_name__ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__magic_name__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) snake_case_ , snake_case_ : Dict = ensure_valid_input(FuncNonContiguousArgs() , __magic_name__ , __magic_name__ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
279
1
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(_UpperCamelCase ) snake_case_ : Optional[int] = FlaxAutoModelForSeqaSeqLM.from_config(config=_UpperCamelCase ) snake_case_ : Union[str, Any] = checkpoints.load_tax_checkpoint(_UpperCamelCase ) snake_case_ : List[Any] = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": snake_case_ : Optional[Any] = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": snake_case_ : int = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ : Optional[Any] = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): snake_case_ : Tuple = f'''layers_{str(_UpperCamelCase )}''' # Self-Attention snake_case_ : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] snake_case_ : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] snake_case_ : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] snake_case_ : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization snake_case_ : Dict = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: snake_case_ : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: snake_case_ : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] snake_case_ : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization snake_case_ : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning snake_case_ : Union[str, Any] = flax_model.params['''encoder''']['''block'''][str(_UpperCamelCase )]['''layer'''] snake_case_ : Optional[Any] = tax_attention_key snake_case_ : Union[str, Any] = tax_attention_out snake_case_ : List[str] = tax_attention_query snake_case_ : Optional[int] = tax_attention_value snake_case_ : Optional[Any] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ : str = tax_global_layer_norm if split_mlp_wi: snake_case_ : List[Any] = tax_mlp_wi_a snake_case_ : Tuple = tax_mlp_wi_a else: snake_case_ : List[Any] = tax_mlp_wi snake_case_ : Tuple = tax_mlp_wo snake_case_ : Union[str, Any] = tax_mlp_layer_norm snake_case_ : int = flax_model_encoder_layer_block # Only for layer 0: snake_case_ : Optional[int] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T snake_case_ : Any = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ : Any = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T snake_case_ : Optional[int] = tax_encoder_global_rel_embedding # Assigning snake_case_ : List[Any] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] snake_case_ : Union[str, Any] = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): snake_case_ : str = f'''layers_{str(_UpperCamelCase )}''' # Self-Attention snake_case_ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] snake_case_ : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] snake_case_ : str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization snake_case_ : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention snake_case_ : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] snake_case_ : Optional[Any] = tax_enc_dec_attention_module['''key''']['''kernel'''] snake_case_ : Dict = tax_enc_dec_attention_module['''out''']['''kernel'''] snake_case_ : Tuple = tax_enc_dec_attention_module['''query''']['''kernel'''] snake_case_ : Dict = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization snake_case_ : Dict = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: snake_case_ : Any = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] snake_case_ : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: snake_case_ : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization snake_case_ : Any = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning snake_case_ : str = flax_model.params['''decoder''']['''block'''][str(_UpperCamelCase )]['''layer'''] snake_case_ : List[str] = tax_attention_key snake_case_ : Optional[Any] = tax_attention_out snake_case_ : List[str] = tax_attention_query snake_case_ : Tuple = tax_attention_value snake_case_ : List[Any] = tax_pre_attention_layer_norm snake_case_ : str = tax_enc_dec_attention_key snake_case_ : Any = tax_enc_dec_attention_out snake_case_ : Union[str, Any] = tax_enc_dec_attention_query snake_case_ : Union[str, Any] = tax_enc_dec_attention_value snake_case_ : int = tax_cross_layer_norm if split_mlp_wi: snake_case_ : List[str] = tax_mlp_wi_a snake_case_ : Union[str, Any] = tax_mlp_wi_a else: snake_case_ : List[Any] = tax_mlp_wi snake_case_ : Optional[int] = tax_mlp_wo snake_case_ : Optional[int] = txa_mlp_layer_norm snake_case_ : List[str] = flax_model_decoder_layer_block # Decoder Normalization snake_case_ : str = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] snake_case_ : List[str] = txa_decoder_norm # Only for layer 0: snake_case_ : List[str] = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T snake_case_ : Optional[Any] = tax_decoder_rel_embedding # Token Embeddings snake_case_ : Dict = tax_model['''target''']['''token_embedder''']['''embedding'''] snake_case_ : Dict = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: snake_case_ : Union[str, Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(_UpperCamelCase ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
279
lowerCAmelCase_ = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case_ : str = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {", ".join(_UpperCamelCase )}''' ) raise ValueError(_UpperCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
279
1
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar('''KEY''') lowerCAmelCase_ = TypeVar('''VAL''') @dataclass(frozen=_a, slots=_a ) class __lowerCAmelCase ( Generic[KEY, VAL] ): lowerCamelCase_ : KEY lowerCamelCase_ : VAL class __lowerCAmelCase ( _Item ): def __init__(self ) -> None: '''simple docstring''' super().__init__(__magic_name__ , __magic_name__ ) def __bool__(self ) -> bool: '''simple docstring''' return False lowerCAmelCase_ = _DeletedItem() class __lowerCAmelCase ( MutableMapping[KEY, VAL] ): def __init__(self , __magic_name__ = 8 , __magic_name__ = 0.75 ) -> None: '''simple docstring''' snake_case_ : Any = initial_block_size snake_case_ : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 snake_case_ : List[Any] = capacity_factor snake_case_ : int = 0 def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' return hash(__magic_name__ ) % len(self._buckets ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' return (ind + 1) % len(self._buckets ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> bool: '''simple docstring''' snake_case_ : Any = self._buckets[ind] if not stored: snake_case_ : Any = _Item(__magic_name__ , __magic_name__ ) self._len += 1 return True elif stored.key == key: snake_case_ : Any = _Item(__magic_name__ , __magic_name__ ) return True else: return False def lowerCamelCase (self ) -> bool: '''simple docstring''' snake_case_ : int = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__magic_name__ ) def lowerCamelCase (self ) -> bool: '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False snake_case_ : Dict = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def lowerCamelCase (self , __magic_name__ ) -> None: '''simple docstring''' snake_case_ : Union[str, Any] = self._buckets snake_case_ : Tuple = [None] * new_size snake_case_ : Dict = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def lowerCamelCase (self ) -> None: '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def lowerCamelCase (self ) -> None: '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def lowerCamelCase (self , __magic_name__ ) -> Iterator[int]: '''simple docstring''' snake_case_ : Dict = self._get_bucket_index(__magic_name__ ) for _ in range(len(self._buckets ) ): yield ind snake_case_ : Dict = self._get_next_ind(__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None: '''simple docstring''' for ind in self._iterate_buckets(__magic_name__ ): if self._try_set(__magic_name__ , __magic_name__ , __magic_name__ ): break def __setitem__(self , __magic_name__ , __magic_name__ ) -> None: '''simple docstring''' if self._is_full(): self._size_up() self._add_item(__magic_name__ , __magic_name__ ) def __delitem__(self , __magic_name__ ) -> None: '''simple docstring''' for ind in self._iterate_buckets(__magic_name__ ): snake_case_ : List[Any] = self._buckets[ind] if item is None: raise KeyError(__magic_name__ ) if item is _deleted: continue if item.key == key: snake_case_ : Union[str, Any] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__(self , __magic_name__ ) -> VAL: '''simple docstring''' for ind in self._iterate_buckets(__magic_name__ ): snake_case_ : str = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__magic_name__ ) def __len__(self ) -> int: '''simple docstring''' return self._len def __iter__(self ) -> Iterator[KEY]: '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__(self ) -> str: '''simple docstring''' snake_case_ : List[Any] = ''' ,'''.join( F'''{item.key}: {item.val}''' for item in self._buckets if item ) return F'''HashMap({val_string})'''
279
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets lowerCAmelCase_ = datasets.logging.get_logger(__name__) lowerCAmelCase_ = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' lowerCAmelCase_ = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' lowerCAmelCase_ = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' lowerCAmelCase_ = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , ) def lowerCamelCase (self , __magic_name__ ) -> List[Any]: '''simple docstring''' if self.config_name == "default": logger.warning( '''Using default BLEURT-Base checkpoint for sequence maximum length 128. ''' '''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' ) snake_case_ : Dict = '''bleurt-base-128''' if self.config_name.lower() in CHECKPOINT_URLS: snake_case_ : Optional[int] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: snake_case_ : Union[str, Any] = self.config_name.upper() else: raise KeyError( F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ ) return {"scores": scores}
279
1
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig lowerCAmelCase_ = logging.get_logger(__name__) # General docstring lowerCAmelCase_ = '''MobileNetV1Config''' # Base docstring lowerCAmelCase_ = '''google/mobilenet_v1_1.0_224''' lowerCAmelCase_ = [1, 1_0_2_4, 7, 7] # Image classification docstring lowerCAmelCase_ = '''google/mobilenet_v1_1.0_224''' lowerCAmelCase_ = '''tabby, tabby cat''' lowerCAmelCase_ = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> str: """simple docstring""" snake_case_ : Union[str, Any] = {} if isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Optional[Any] = model.mobilenet_va else: snake_case_ : Optional[Any] = model snake_case_ : Optional[int] = '''MobilenetV1/Conv2d_0/''' snake_case_ : List[str] = backbone.conv_stem.convolution.weight snake_case_ : Any = backbone.conv_stem.normalization.bias snake_case_ : List[str] = backbone.conv_stem.normalization.weight snake_case_ : Dict = backbone.conv_stem.normalization.running_mean snake_case_ : Optional[int] = backbone.conv_stem.normalization.running_var for i in range(13 ): snake_case_ : str = i + 1 snake_case_ : List[Any] = i * 2 snake_case_ : Tuple = backbone.layer[pt_index] snake_case_ : List[str] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' snake_case_ : int = pointer.convolution.weight snake_case_ : Tuple = pointer.normalization.bias snake_case_ : Optional[int] = pointer.normalization.weight snake_case_ : int = pointer.normalization.running_mean snake_case_ : List[Any] = pointer.normalization.running_var snake_case_ : List[Any] = backbone.layer[pt_index + 1] snake_case_ : Dict = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' snake_case_ : List[Any] = pointer.convolution.weight snake_case_ : Optional[int] = pointer.normalization.bias snake_case_ : List[str] = pointer.normalization.weight snake_case_ : Union[str, Any] = pointer.normalization.running_mean snake_case_ : Optional[Any] = pointer.normalization.running_var if isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Optional[int] = '''MobilenetV1/Logits/Conv2d_1c_1x1/''' snake_case_ : int = model.classifier.weight snake_case_ : Tuple = model.classifier.bias return tf_to_pt_map def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( '''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ''' '''https://www.tensorflow.org/install/ for installation instructions.''' ) raise # Load weights from TF model snake_case_ : List[str] = tf.train.list_variables(_UpperCamelCase ) snake_case_ : Dict = {} for name, shape in init_vars: logger.info(f'''Loading TF weight {name} with shape {shape}''' ) snake_case_ : Dict = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase ) snake_case_ : Optional[int] = array # Build TF to PyTorch weights loading map snake_case_ : str = _build_tf_to_pytorch_map(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for name, pointer in tf_to_pt_map.items(): logger.info(f'''Importing {name}''' ) if name not in tf_weights: logger.info(f'''{name} not in tf pre-trained weights, skipping''' ) continue snake_case_ : str = tf_weights[name] if "depthwise_weights" in name: logger.info('''Transposing depthwise''' ) snake_case_ : List[str] = np.transpose(_UpperCamelCase , (2, 3, 0, 1) ) elif "weights" in name: logger.info('''Transposing''' ) if len(pointer.shape ) == 2: # copying into linear layer snake_case_ : int = array.squeeze().transpose() else: snake_case_ : Union[str, Any] = np.transpose(_UpperCamelCase , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' ) snake_case_ : Any = torch.from_numpy(_UpperCamelCase ) tf_weights.pop(_UpperCamelCase , _UpperCamelCase ) tf_weights.pop(name + '''/RMSProp''' , _UpperCamelCase ) tf_weights.pop(name + '''/RMSProp_1''' , _UpperCamelCase ) tf_weights.pop(name + '''/ExponentialMovingAverage''' , _UpperCamelCase ) logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' ) return model def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> torch.Tensor: """simple docstring""" snake_case_ , snake_case_ : Any = features.shape[-2:] snake_case_ , snake_case_ : str = conv_layer.stride snake_case_ , snake_case_ : List[Any] = conv_layer.kernel_size if in_height % stride_height == 0: snake_case_ : Tuple = max(kernel_height - stride_height , 0 ) else: snake_case_ : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: snake_case_ : Optional[int] = max(kernel_width - stride_width , 0 ) else: snake_case_ : Dict = max(kernel_width - (in_width % stride_width) , 0 ) snake_case_ : Union[str, Any] = pad_along_width // 2 snake_case_ : Optional[Any] = pad_along_width - pad_left snake_case_ : Optional[Any] = pad_along_height // 2 snake_case_ : Union[str, Any] = pad_along_height - pad_top snake_case_ : Tuple = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_UpperCamelCase , _UpperCamelCase , '''constant''' , 0.0 ) class __lowerCAmelCase ( nn.Module ): def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 1 , __magic_name__ = 1 , __magic_name__ = False , __magic_name__ = True , __magic_name__ = True , ) -> None: '''simple docstring''' super().__init__() snake_case_ : Optional[Any] = config if in_channels % groups != 0: raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) snake_case_ : List[str] = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) snake_case_ : List[str] = nn.Convad( in_channels=__magic_name__ , out_channels=__magic_name__ , kernel_size=__magic_name__ , stride=__magic_name__ , padding=__magic_name__ , groups=__magic_name__ , bias=__magic_name__ , padding_mode='''zeros''' , ) if use_normalization: snake_case_ : Tuple = nn.BatchNormad( num_features=__magic_name__ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=__magic_name__ , track_running_stats=__magic_name__ , ) else: snake_case_ : str = None if use_activation: if isinstance(__magic_name__ , __magic_name__ ): snake_case_ : Optional[Any] = ACTaFN[use_activation] elif isinstance(config.hidden_act , __magic_name__ ): snake_case_ : Any = ACTaFN[config.hidden_act] else: snake_case_ : List[str] = config.hidden_act else: snake_case_ : Any = None def lowerCamelCase (self , __magic_name__ ) -> torch.Tensor: '''simple docstring''' if self.config.tf_padding: snake_case_ : Union[str, Any] = apply_tf_padding(__magic_name__ , self.convolution ) snake_case_ : Union[str, Any] = self.convolution(__magic_name__ ) if self.normalization is not None: snake_case_ : int = self.normalization(__magic_name__ ) if self.activation is not None: snake_case_ : List[Any] = self.activation(__magic_name__ ) return features class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = MobileNetVaConfig lowerCamelCase_ : Optional[int] = load_tf_weights_in_mobilenet_va lowerCamelCase_ : int = '''mobilenet_v1''' lowerCamelCase_ : str = '''pixel_values''' lowerCamelCase_ : List[str] = False def lowerCamelCase (self , __magic_name__ ) -> None: '''simple docstring''' if isinstance(__magic_name__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__magic_name__ , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) lowerCAmelCase_ = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCAmelCase_ = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''', _a, ) class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ , __magic_name__ = True ) -> List[Any]: '''simple docstring''' super().__init__(__magic_name__ ) snake_case_ : Union[str, Any] = config snake_case_ : str = 32 snake_case_ : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth ) snake_case_ : List[str] = MobileNetVaConvLayer( __magic_name__ , in_channels=config.num_channels , out_channels=__magic_name__ , kernel_size=3 , stride=2 , ) snake_case_ : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] snake_case_ : Union[str, Any] = nn.ModuleList() for i in range(13 ): snake_case_ : List[str] = out_channels if strides[i] == 2 or i == 0: depth *= 2 snake_case_ : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , kernel_size=3 , stride=strides[i] , groups=__magic_name__ , ) ) self.layer.append( MobileNetVaConvLayer( __magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , kernel_size=1 , ) ) snake_case_ : List[str] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCamelCase (self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: '''simple docstring''' snake_case_ : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) snake_case_ : Union[str, Any] = self.conv_stem(__magic_name__ ) snake_case_ : Union[str, Any] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): snake_case_ : List[str] = layer_module(__magic_name__ ) if output_hidden_states: snake_case_ : Union[str, Any] = all_hidden_states + (hidden_states,) snake_case_ : Any = hidden_states if self.pooler is not None: snake_case_ : Optional[int] = torch.flatten(self.pooler(__magic_name__ ) , start_dim=1 ) else: snake_case_ : Tuple = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=__magic_name__ , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''', _a, ) class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ ) -> None: '''simple docstring''' super().__init__(__magic_name__ ) snake_case_ : Dict = config.num_labels snake_case_ : List[str] = MobileNetVaModel(__magic_name__ ) snake_case_ : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head snake_case_ : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob , inplace=__magic_name__ ) snake_case_ : Any = nn.Linear(__magic_name__ , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCamelCase (self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: '''simple docstring''' snake_case_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ : Any = self.mobilenet_va(__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ ) snake_case_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] snake_case_ : List[Any] = self.classifier(self.dropout(__magic_name__ ) ) snake_case_ : Optional[Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ : List[str] = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ : List[str] = '''single_label_classification''' else: snake_case_ : Optional[Any] = '''multi_label_classification''' if self.config.problem_type == "regression": snake_case_ : Optional[Any] = MSELoss() if self.num_labels == 1: snake_case_ : str = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ : str = loss_fct(__magic_name__ , __magic_name__ ) elif self.config.problem_type == "single_label_classification": snake_case_ : List[str] = CrossEntropyLoss() snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ : Tuple = BCEWithLogitsLoss() snake_case_ : Optional[Any] = loss_fct(__magic_name__ , __magic_name__ ) if not return_dict: snake_case_ : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states , )
279
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''): lowerCAmelCase_ = { '''linear''': PIL.Image.Resampling.BILINEAR, '''bilinear''': PIL.Image.Resampling.BILINEAR, '''bicubic''': PIL.Image.Resampling.BICUBIC, '''lanczos''': PIL.Image.Resampling.LANCZOS, '''nearest''': PIL.Image.Resampling.NEAREST, } else: lowerCAmelCase_ = { '''linear''': PIL.Image.LINEAR, '''bilinear''': PIL.Image.BILINEAR, '''bicubic''': PIL.Image.BICUBIC, '''lanczos''': PIL.Image.LANCZOS, '''nearest''': PIL.Image.NEAREST, } def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Dict = (images / 2 + 0.5).clamp(0 , 1 ) snake_case_ : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() snake_case_ : int = numpy_to_pil(_UpperCamelCase ) return images def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" if images.ndim == 3: snake_case_ : Optional[Any] = images[None, ...] snake_case_ : Any = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images snake_case_ : str = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: snake_case_ : List[Any] = [Image.fromarray(_UpperCamelCase ) for image in images] return pil_images
279
1
def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" if isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" snake_case_ : Optional[Any] = False if num < 0: snake_case_ : Dict = True snake_case_ : Any = -num snake_case_ : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(_UpperCamelCase ) for e in binary ) return "0b" + "".join(str(_UpperCamelCase ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
279
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Any = BioGptTokenizer lowerCamelCase_ : Optional[Any] = False def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : Optional[Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(__magic_name__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : str = '''lower newer''' snake_case_ : Dict = '''lower newer''' return input_text, output_text def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file ) snake_case_ : Union[str, Any] = '''lower''' snake_case_ : Optional[int] = ['''low''', '''er</w>'''] snake_case_ : Any = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = tokens + ['''<unk>'''] snake_case_ : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ ) snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
279
1
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowerCAmelCase_ = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) lowerCAmelCase_ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase_ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase_ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase_ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } lowerCAmelCase_ = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase_ = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) lowerCAmelCase_ = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase_ = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) lowerCAmelCase_ = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase_ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' lowerCAmelCase_ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase_ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' lowerCAmelCase_ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' lowerCAmelCase_ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' lowerCAmelCase_ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' lowerCAmelCase_ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' lowerCAmelCase_ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' lowerCAmelCase_ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' lowerCAmelCase_ = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase_ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' lowerCAmelCase_ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' lowerCAmelCase_ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' lowerCAmelCase_ = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase_ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' lowerCAmelCase_ = '''''' lowerCAmelCase_ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' lowerCAmelCase_ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' lowerCAmelCase_ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( '''readme_md, expected_dict''' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" assert ReadMe.from_string(_UpperCamelCase , _UpperCamelCase ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" with pytest.raises(_UpperCamelCase , match=re.escape(expected_error.format(path='''root''' ) ) ): snake_case_ : Union[str, Any] = ReadMe.from_string(_UpperCamelCase , _UpperCamelCase ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" with pytest.raises(_UpperCamelCase , match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(_UpperCamelCase , _UpperCamelCase ) @pytest.mark.parametrize( '''readme_md,''' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]: """simple docstring""" ReadMe.from_string(_UpperCamelCase , _UpperCamelCase , suppress_parsing_errors=_UpperCamelCase ) @pytest.mark.parametrize( '''readme_md, expected_dict''' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : int = Path(_UpperCamelCase ) / '''README.md''' with open(_UpperCamelCase , '''w+''' ) as readme_file: readme_file.write(_UpperCamelCase ) snake_case_ : Optional[int] = ReadMe.from_readme(_UpperCamelCase , _UpperCamelCase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Tuple = Path(_UpperCamelCase ) / '''README.md''' with open(_UpperCamelCase , '''w+''' ) as readme_file: readme_file.write(_UpperCamelCase ) snake_case_ : Optional[int] = expected_error.format(path=_UpperCamelCase ) with pytest.raises(_UpperCamelCase , match=re.escape(_UpperCamelCase ) ): snake_case_ : str = ReadMe.from_readme(_UpperCamelCase , _UpperCamelCase ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Dict = Path(_UpperCamelCase ) / '''README.md''' with open(_UpperCamelCase , '''w+''' ) as readme_file: readme_file.write(_UpperCamelCase ) snake_case_ : Dict = expected_error.format(path=_UpperCamelCase ) with pytest.raises(_UpperCamelCase , match=re.escape(_UpperCamelCase ) ): ReadMe.from_readme(_UpperCamelCase , _UpperCamelCase ) @pytest.mark.parametrize( '''readme_md,''' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Tuple = Path(_UpperCamelCase ) / '''README.md''' with open(_UpperCamelCase , '''w+''' ) as readme_file: readme_file.write(_UpperCamelCase ) ReadMe.from_readme(_UpperCamelCase , _UpperCamelCase , suppress_parsing_errors=_UpperCamelCase )
279
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]: """simple docstring""" snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) ) snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )] index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase ) snake_case_ : float = 0 snake_case_ : list[float] = [0] * len(_UpperCamelCase ) for i in index: if weight[i] <= capacity: snake_case_ : Dict = 1 max_value += value[i] capacity -= weight[i] else: snake_case_ : Union[str, Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
279
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Dict = ['''pixel_values'''] def __init__(self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = None , __magic_name__ = True , __magic_name__ = 1 / 255 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> None: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 256} snake_case_ : List[str] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) snake_case_ : int = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} snake_case_ : Optional[Any] = get_size_dict(__magic_name__ ) snake_case_ : Tuple = do_resize snake_case_ : Tuple = size snake_case_ : Dict = resample snake_case_ : Optional[Any] = do_center_crop snake_case_ : Union[str, Any] = crop_size snake_case_ : Any = do_rescale snake_case_ : List[str] = rescale_factor snake_case_ : Any = do_normalize snake_case_ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray: '''simple docstring''' snake_case_ : Optional[Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case_ : int = get_resize_output_image_size(__magic_name__ , size=size['''shortest_edge'''] , default_to_square=__magic_name__ ) return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray: '''simple docstring''' snake_case_ : int = get_size_dict(__magic_name__ ) return center_crop(__magic_name__ , size=(size['''height'''], size['''width''']) , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ ) -> np.ndarray: '''simple docstring''' return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray: '''simple docstring''' return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = do_resize if do_resize is not None else self.do_resize snake_case_ : Dict = size if size is not None else self.size snake_case_ : Dict = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) snake_case_ : List[Any] = resample if resample is not None else self.resample snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : int = crop_size if crop_size is not None else self.crop_size snake_case_ : Tuple = get_size_dict(__magic_name__ ) snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : List[str] = image_mean if image_mean is not None else self.image_mean snake_case_ : str = image_std if image_std is not None else self.image_std snake_case_ : int = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. snake_case_ : Union[str, Any] = [to_numpy_array(__magic_name__ ) for image in images] if do_resize: snake_case_ : Dict = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images] if do_center_crop: snake_case_ : Optional[Any] = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images] if do_rescale: snake_case_ : str = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images] if do_normalize: snake_case_ : int = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images] snake_case_ : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] snake_case_ : List[Any] = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
279
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict: '''simple docstring''' snake_case_ : Tuple = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Union[str, Any] = num_channels snake_case_ : Optional[Any] = image_size snake_case_ : int = min_resolution snake_case_ : Any = max_resolution snake_case_ : Tuple = do_resize snake_case_ : str = size_divisor snake_case_ : Optional[Any] = do_rescale def lowerCamelCase (self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = GLPNImageProcessingTester(self ) @property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) ) self.assertTrue(hasattr(__magic_name__ , '''resample''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
279
1
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Any = BioGptTokenizer lowerCamelCase_ : Optional[Any] = False def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : Optional[Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(__magic_name__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : str = '''lower newer''' snake_case_ : Dict = '''lower newer''' return input_text, output_text def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file ) snake_case_ : Union[str, Any] = '''lower''' snake_case_ : Optional[int] = ['''low''', '''er</w>'''] snake_case_ : Any = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = tokens + ['''<unk>'''] snake_case_ : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ ) snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
279
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
279
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMSNModel''', '''ViTMSNForImageClassification''', '''ViTMSNPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
279
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase_ = float('''nan''') class __lowerCAmelCase : def __init__(self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : List[Any] = sys.stdout snake_case_ : int = open(__magic_name__ , '''a''' ) def __getattr__(self , __magic_name__ ) -> Dict: '''simple docstring''' return getattr(self.stdout , __magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' self.stdout.write(__magic_name__ ) # strip tqdm codes self.file.write(re.sub(R'''^.*\r''' , '''''' , __magic_name__ , 0 , re.M ) ) def lowerCamelCase_ ( _UpperCamelCase=80 , _UpperCamelCase=False ) -> str: """simple docstring""" snake_case_ : str = [] # deal with critical env vars snake_case_ : int = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: snake_case_ : Optional[int] = os.environ.get(_UpperCamelCase , _UpperCamelCase ) if val is not None: cmd.append(f'''{key}={val}''' ) # python executable (not always needed if the script is executable) snake_case_ : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(_UpperCamelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes snake_case_ : Dict = [] snake_case_ : Dict = '''''' while len(_UpperCamelCase ) > 0: current_line += f'''{cmd.pop(0 )} ''' if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(_UpperCamelCase ) snake_case_ : List[Any] = '''''' return "\\\n".join(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" snake_case_ : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd ) # remove --output_dir if any and set our own snake_case_ : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd ) args.base_cmd += f''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir snake_case_ : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , ) snake_case_ : Tuple = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase ) if verbose: print('''STDOUT''' , result.stdout ) print('''STDERR''' , result.stderr ) # save the streams snake_case_ : Any = variation.replace(''' ''' , '''-''' ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f: f.write(result.stdout ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f: snake_case_ : str = json.load(_UpperCamelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple: """simple docstring""" snake_case_ : Tuple = [] snake_case_ : Any = [] snake_case_ : int = f'''{id}: {variation:<{longest_variation_len}}''' snake_case_ : Optional[Any] = f'''{preamble}: ''' snake_case_ : Optional[int] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ): snake_case_ : int = process_run_single( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ : List[str] = single_run_metrics[target_metric_key] if not math.isnan(_UpperCamelCase ): metrics.append(_UpperCamelCase ) results.append(_UpperCamelCase ) outcome += "✓" else: outcome += "✘" snake_case_ : Any = f'''\33[2K\r{outcome}''' if len(_UpperCamelCase ) > 0: snake_case_ : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} snake_case_ : Any = round(mean_metrics[target_metric_key] , 2 ) snake_case_ : List[str] = f'''{outcome} {mean_target}''' if len(_UpperCamelCase ) > 1: results_str += f''' {tuple(round(_UpperCamelCase , 2 ) for x in results )}''' print(_UpperCamelCase ) snake_case_ : Optional[int] = variation return mean_metrics else: print(_UpperCamelCase ) return {variation_key: variation, target_metric_key: nan} def lowerCamelCase_ ( ) -> Optional[int]: """simple docstring""" snake_case_ : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return f''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : str = pd.DataFrame(_UpperCamelCase ) snake_case_ : Optional[int] = '''variation''' snake_case_ : Union[str, Any] = '''diff_%''' snake_case_ : Optional[int] = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan snake_case_ : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(_UpperCamelCase ): # as a fallback, use the minimal value as the sentinel snake_case_ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(_UpperCamelCase ): snake_case_ : Dict = df.apply( lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='''columns''' , ) # re-order columns snake_case_ : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys] snake_case_ : int = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols # capitalize snake_case_ : Optional[int] = df.rename(str.capitalize , axis='''columns''' ) # make the cols as narrow as possible snake_case_ : Any = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' ) snake_case_ : int = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' ) snake_case_ : Tuple = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] print('''\n\n'''.join(_UpperCamelCase ) ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) snake_case_ : Tuple = parser.parse_args() snake_case_ : Optional[Any] = args.output_dir Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) snake_case_ : Optional[int] = get_base_command(_UpperCamelCase , _UpperCamelCase ) # split each dimension into its --foo variations snake_case_ : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty snake_case_ : List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) ) snake_case_ : Optional[int] = max(len(_UpperCamelCase ) for x in variations ) # split wanted keys snake_case_ : int = args.report_metric_keys.split() # capture prints into a log file for convenience snake_case_ : str = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(f'''and this script\'s output is also piped into {report_fn}''' ) snake_case_ : Tuple = Tee(_UpperCamelCase ) print(f'''\n*** Running {len(_UpperCamelCase )} benchmarks:''' ) print(f'''Base command: {" ".join(_UpperCamelCase )}''' ) snake_case_ : List[Any] = '''variation''' snake_case_ : Tuple = [] for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ): snake_case_ : Optional[Any] = base_cmd + variation.split() results.append( process_run( id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) ) process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase ) if __name__ == "__main__": main()
279
1
import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Union[str, Any] = MvpTokenizer lowerCamelCase_ : List[Any] = MvpTokenizerFast lowerCamelCase_ : Dict = True lowerCamelCase_ : Union[str, Any] = filter_roberta_detectors def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' super().setUp() snake_case_ : int = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] snake_case_ : List[str] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) snake_case_ : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case_ : Dict = {'''unk_token''': '''<unk>'''} snake_case_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def lowerCamelCase (self , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowerCamelCase (self , **__magic_name__ ) -> List[str]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Tuple: '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' ) @cached_property def lowerCamelCase (self ) -> List[str]: '''simple docstring''' return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' ) @require_torch def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] snake_case_ : List[str] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: snake_case_ : List[Any] = tokenizer(__magic_name__ , max_length=len(__magic_name__ ) , padding=__magic_name__ , return_tensors='''pt''' ) self.assertIsInstance(__magic_name__ , __magic_name__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) snake_case_ : Optional[int] = batch.input_ids.tolist()[0] self.assertListEqual(__magic_name__ , __magic_name__ ) # Test that special tokens are reset @require_torch def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: snake_case_ : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors='''pt''' ) # check if input_ids are returned and no labels self.assertIn('''input_ids''' , __magic_name__ ) self.assertIn('''attention_mask''' , __magic_name__ ) self.assertNotIn('''labels''' , __magic_name__ ) self.assertNotIn('''decoder_attention_mask''' , __magic_name__ ) @require_torch def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : List[Any] = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: snake_case_ : Tuple = tokenizer(text_target=__magic_name__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: snake_case_ : Union[str, Any] = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__magic_name__ , truncation=__magic_name__ , return_tensors='''pt''' ) self.assertIsInstance(__magic_name__ , __magic_name__ ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Dict = ['''A long paragraph for summarization.'''] snake_case_ : List[Any] = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: snake_case_ : str = tokenizer(__magic_name__ , text_target=__magic_name__ , return_tensors='''pt''' ) snake_case_ : Tuple = inputs['''input_ids'''] snake_case_ : Tuple = inputs['''labels'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' pass def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ ) snake_case_ : Dict = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ ) snake_case_ : Optional[int] = '''A, <mask> AllenNLP sentence.''' snake_case_ : Optional[int] = tokenizer_r.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ ) snake_case_ : int = tokenizer_p.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) snake_case_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) snake_case_ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( __magic_name__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __magic_name__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
279
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowerCAmelCase_ = CLIPImageProcessor() lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') lowerCAmelCase_ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
279
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', } } lowerCAmelCase_ = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } # Segments (not really needed) lowerCAmelCase_ = 0 lowerCAmelCase_ = 1 lowerCAmelCase_ = 2 lowerCAmelCase_ = 3 lowerCAmelCase_ = 4 class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = VOCAB_FILES_NAMES lowerCamelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : str = '''left''' def __init__(self , __magic_name__ , __magic_name__=False , __magic_name__=True , __magic_name__=False , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="<unk>" , __magic_name__="<sep>" , __magic_name__="<pad>" , __magic_name__="<cls>" , __magic_name__="<mask>" , __magic_name__=["<eop>", "<eod>"] , __magic_name__ = None , **__magic_name__ , ) -> None: '''simple docstring''' snake_case_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token snake_case_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) snake_case_ : Any = 3 snake_case_ : List[str] = do_lower_case snake_case_ : str = remove_space snake_case_ : Dict = keep_accents snake_case_ : Any = vocab_file snake_case_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) @property def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return len(self.sp_model ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ) -> List[Any]: '''simple docstring''' snake_case_ : str = self.__dict__.copy() snake_case_ : str = None return state def __setstate__(self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ : Optional[int] = {} snake_case_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase (self , __magic_name__ ) -> Tuple: '''simple docstring''' if self.remove_space: snake_case_ : str = ''' '''.join(inputs.strip().split() ) else: snake_case_ : List[Any] = inputs snake_case_ : Tuple = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: snake_case_ : Any = unicodedata.normalize('''NFKD''' , __magic_name__ ) snake_case_ : str = ''''''.join([c for c in outputs if not unicodedata.combining(__magic_name__ )] ) if self.do_lower_case: snake_case_ : List[Any] = outputs.lower() return outputs def lowerCamelCase (self , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.preprocess_text(__magic_name__ ) snake_case_ : str = self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) snake_case_ : Any = [] for piece in pieces: if len(__magic_name__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): snake_case_ : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__magic_name__ , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: snake_case_ : str = cur_pieces[1:] else: snake_case_ : Optional[int] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__magic_name__ ) else: new_pieces.append(__magic_name__ ) return new_pieces def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' return self.sp_model.PieceToId(__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Optional[int]: '''simple docstring''' return self.sp_model.IdToPiece(__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : List[str] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def lowerCamelCase (self , __magic_name__ , __magic_name__ = False , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = kwargs.pop('''use_source_tokenizer''' , __magic_name__ ) snake_case_ : Dict = self.convert_ids_to_tokens(__magic_name__ , skip_special_tokens=__magic_name__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 snake_case_ : int = [] snake_case_ : str = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__magic_name__ ) ) snake_case_ : List[Any] = [] sub_texts.append(__magic_name__ ) else: current_sub_text.append(__magic_name__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__magic_name__ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens snake_case_ : Optional[int] = ''''''.join(__magic_name__ ) snake_case_ : Optional[Any] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: snake_case_ : Dict = self.clean_up_tokenization(__magic_name__ ) return clean_text else: return text def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]: '''simple docstring''' snake_case_ : Tuple = [self.sep_token_id] snake_case_ : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is not None: return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1, 1] return ([0] * len(__magic_name__ )) + [1, 1] def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]: '''simple docstring''' snake_case_ : List[Any] = [self.sep_token_id] snake_case_ : Tuple = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__magic_name__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : Tuple = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , '''wb''' ) as fi: snake_case_ : int = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,)
279
from math import factorial lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)} def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) ) def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length snake_case_ : Optional[Any] = 0 # the cached sizes of the previous chains snake_case_ : dict[int, int] = {} for start_chain_element in range(1 , _UpperCamelCase ): # The temporary set will contain the elements of the chain snake_case_ : List[str] = set() snake_case_ : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. snake_case_ : Any = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_UpperCamelCase ) chain_set_length += 1 snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] snake_case_ : List[str] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution()}''')
279
1
from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=_a ): lowerCamelCase_ : str = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *__magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class __lowerCAmelCase ( metaclass=_a ): lowerCamelCase_ : List[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *__magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class __lowerCAmelCase ( metaclass=_a ): lowerCamelCase_ : Tuple = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *__magic_name__ , **__magic_name__ ) -> str: '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class __lowerCAmelCase ( metaclass=_a ): lowerCamelCase_ : int = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class __lowerCAmelCase ( metaclass=_a ): lowerCamelCase_ : Any = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *__magic_name__ , **__magic_name__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Any: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class __lowerCAmelCase ( metaclass=_a ): lowerCamelCase_ : Optional[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__(self , *__magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowerCamelCase (cls , *__magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
279
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''''' lowerCamelCase_ : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) lowerCamelCase_ : str = None # compression type in fsspec. ex: "gzip" lowerCamelCase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__(self , __magic_name__ = "" , __magic_name__ = None , __magic_name__ = None , **__magic_name__ ) -> Any: '''simple docstring''' super().__init__(self , **__magic_name__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case_ : Union[str, Any] = fsspec.open( __magic_name__ , mode='''rb''' , protocol=__magic_name__ , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) snake_case_ : Tuple = os.path.basename(self.file.path.split('''::''' )[0] ) snake_case_ : Optional[Any] = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) snake_case_ : Dict = None @classmethod def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]: '''simple docstring''' return super()._strip_protocol(__magic_name__ ).lstrip('''/''' ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' if self.dir_cache is None: snake_case_ : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} snake_case_ : List[str] = {f['''name''']: f} def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return self.file.open().read() def lowerCamelCase (self , __magic_name__ , __magic_name__ = "rb" , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = self._strip_protocol(__magic_name__ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''bz2''' lowerCamelCase_ : Any = '''bz2''' lowerCamelCase_ : int = '''.bz2''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''gzip''' lowerCamelCase_ : Dict = '''gzip''' lowerCamelCase_ : int = '''.gz''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''lz4''' lowerCamelCase_ : Any = '''lz4''' lowerCamelCase_ : Optional[Any] = '''.lz4''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''xz''' lowerCamelCase_ : Any = '''xz''' lowerCamelCase_ : int = '''.xz''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''zstd''' lowerCamelCase_ : Tuple = '''zstd''' lowerCamelCase_ : Any = '''.zst''' def __init__(self , __magic_name__ , __magic_name__ = "rb" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = DEFAULT_BLOCK_SIZE , **__magic_name__ , ) -> Tuple: '''simple docstring''' super().__init__( fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case_ : Dict = self.file.__enter__ class __lowerCAmelCase : def __init__(self , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : str = file_ def __enter__(self ) -> List[Any]: '''simple docstring''' self._file.__enter__() return self def __exit__(self , *__magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' self._file.__exit__(*__magic_name__ , **__magic_name__ ) def __iter__(self ) -> Optional[int]: '''simple docstring''' return iter(self._file ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return next(self._file ) def __getattr__(self , __magic_name__ ) -> str: '''simple docstring''' return getattr(self._file , __magic_name__ ) def fixed_enter(*__magic_name__ , **__magic_name__ ): return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) ) snake_case_ : Tuple = fixed_enter
279
1
class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Any = name snake_case_ : int = value snake_case_ : Optional[int] = weight def __repr__(self ) -> Dict: '''simple docstring''' return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return self.value def lowerCamelCase (self ) -> List[str]: '''simple docstring''' return self.name def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' return self.weight def lowerCamelCase (self ) -> Tuple: '''simple docstring''' return self.value / self.weight def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Tuple = [] for i in range(len(_UpperCamelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Union[str, Any] = sorted(_UpperCamelCase , key=_UpperCamelCase , reverse=_UpperCamelCase ) snake_case_ : Any = [] snake_case_ , snake_case_ : List[Any] = 0.0, 0.0 for i in range(len(_UpperCamelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
279
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''megatron-bert''' def __init__(self , __magic_name__=2_9056 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=__magic_name__ , **__magic_name__ ) snake_case_ : Union[str, Any] = vocab_size snake_case_ : Dict = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : int = hidden_act snake_case_ : List[str] = intermediate_size snake_case_ : Dict = hidden_dropout_prob snake_case_ : str = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : Any = type_vocab_size snake_case_ : int = initializer_range snake_case_ : int = layer_norm_eps snake_case_ : List[str] = position_embedding_type snake_case_ : Dict = use_cache
279
1
import math def lowerCamelCase_ ( _UpperCamelCase ) -> list: """simple docstring""" snake_case_ : Any = [True] * n snake_case_ : str = False snake_case_ : Optional[int] = False snake_case_ : List[Any] = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): snake_case_ : Optional[int] = i * 2 while index < n: snake_case_ : Any = False snake_case_ : Optional[Any] = index + i snake_case_ : str = [2] for i in range(3 , _UpperCamelCase , 2 ): if is_prime[i]: primes.append(_UpperCamelCase ) return primes def lowerCamelCase_ ( _UpperCamelCase = 999_966_663_333 ) -> int: """simple docstring""" snake_case_ : Any = math.floor(math.sqrt(_UpperCamelCase ) ) + 100 snake_case_ : str = prime_sieve(_UpperCamelCase ) snake_case_ : int = 0 snake_case_ : int = 0 snake_case_ : List[str] = primes[prime_index] while (last_prime**2) <= limit: snake_case_ : Any = primes[prime_index + 1] snake_case_ : int = last_prime**2 snake_case_ : Optional[Any] = next_prime**2 # Get numbers divisible by lps(current) snake_case_ : int = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) snake_case_ : str = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps snake_case_ : str = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair snake_case_ : Dict = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
279
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch lowerCAmelCase_ = random.Random() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]: """simple docstring""" if rng is None: snake_case_ : str = global_rng snake_case_ : Any = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=400 , __magic_name__=2000 , __magic_name__=10 , __magic_name__=160 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=4000 , __magic_name__=False , __magic_name__=True , ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = parent snake_case_ : str = batch_size snake_case_ : Union[str, Any] = min_seq_length snake_case_ : Tuple = max_seq_length snake_case_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case_ : Optional[int] = padding_value snake_case_ : Union[str, Any] = sampling_rate snake_case_ : Optional[int] = return_attention_mask snake_case_ : str = do_normalize snake_case_ : str = feature_size snake_case_ : Optional[Any] = chunk_length snake_case_ : Union[str, Any] = hop_length def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCamelCase (self , __magic_name__=False , __magic_name__=False ) -> Optional[Any]: '''simple docstring''' def _flatten(__magic_name__ ): return list(itertools.chain(*__magic_name__ ) ) if equal_length: snake_case_ : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size snake_case_ : int = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case_ : str = [np.asarray(__magic_name__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = WhisperFeatureExtractionTester(self ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Union[str, Any] = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) snake_case_ : List[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ ) snake_case_ : Optional[int] = feat_extract_first.to_dict() snake_case_ : Dict = feat_extract_second.to_dict() snake_case_ : List[str] = feat_extract_first.mel_filters snake_case_ : Union[str, Any] = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(__magic_name__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(__magic_name__ ) snake_case_ : Optional[int] = self.feature_extraction_class.from_json_file(__magic_name__ ) snake_case_ : int = feat_extract_first.to_dict() snake_case_ : Optional[int] = feat_extract_second.to_dict() snake_case_ : Union[str, Any] = feat_extract_first.mel_filters snake_case_ : str = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] snake_case_ : str = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] # Test feature size snake_case_ : str = feature_extractor(__magic_name__ , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input snake_case_ : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features snake_case_ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test batched snake_case_ : int = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] snake_case_ : List[str] = np.asarray(__magic_name__ ) snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : Dict = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test truncation required snake_case_ : Any = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] snake_case_ : Union[str, Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] snake_case_ : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs] snake_case_ : Optional[Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated] snake_case_ : Any = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) def lowerCamelCase (self ) -> int: '''simple docstring''' import torch snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa ) snake_case_ : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech snake_case_ : Optional[Any] = ds.sort('''id''' ).select(range(__magic_name__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : str = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on snake_case_ : List[Any] = self._load_datasamples(1 ) snake_case_ : Union[str, Any] = WhisperFeatureExtractor() snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1e-4 ) ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Optional[int] = self._load_datasamples(1 )[0] snake_case_ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue snake_case_ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0] self.assertTrue(np.all(np.mean(__magic_name__ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1e-3 ) )
279
1
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : List[str] = LxmertConfig.from_json_file(_UpperCamelCase ) print(f'''Building PyTorch model from configuration: {config}''' ) snake_case_ : Optional[int] = LxmertForPreTraining(_UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
279
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase_ = logging.getLogger(__name__) def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" snake_case_ : List[str] = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=_UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=_UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=_UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=_UpperCamelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=_UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=_UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) snake_case_ : List[Any] = parser.parse_args() return args def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" def fn(_UpperCamelCase ): return tokenizer(examples['''text'''] ) return fn def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Any = [] for i in range(len(tokenized_data['''input_ids'''] ) ): snake_case_ : Any = { '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } snake_case_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase ) snake_case_ : Optional[Any] = tf.train.Example(features=_UpperCamelCase ) snake_case_ : Optional[Any] = example.SerializeToString() records.append(_UpperCamelCase ) return records def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , args.limit ) snake_case_ : int = dataset.select(range(_UpperCamelCase ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) snake_case_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) snake_case_ : str = os.path.join(args.output_dir , args.split ) if not os.path.exists(_UpperCamelCase ): os.makedirs(_UpperCamelCase ) else: snake_case_ : Optional[Any] = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. snake_case_ : Optional[Any] = tokenize_function(_UpperCamelCase ) snake_case_ : List[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(_UpperCamelCase ): # Concatenate all texts. snake_case_ : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()} snake_case_ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 snake_case_ : int = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. snake_case_ : Union[str, Any] = { k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )] for k, t in concatenated_examples.items() } return result snake_case_ : int = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1_000 , num_proc=4 ) snake_case_ : str = 0 snake_case_ : Optional[Any] = 0 for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ): snake_case_ : Any = grouped_dataset[shard : shard + args.shard_size] snake_case_ : str = len(dataset_snapshot['''input_ids'''] ) snake_case_ : Union[str, Any] = os.path.join(_UpperCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) snake_case_ : Dict = get_serialized_examples(_UpperCamelCase ) with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file: for i in range(len(_UpperCamelCase ) ): snake_case_ : List[str] = serialized_examples[i] out_file.write(_UpperCamelCase ) print('''Wrote file {} containing {} records'''.format(_UpperCamelCase , _UpperCamelCase ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f: print(f'''Total {args.split} records: {total_records}''' , file=_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = parse_args() main(args)
279
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''', # See all Marian models at https://huggingface.co/models?filter=marian } class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[Any] = '''marian''' lowerCamelCase_ : Optional[int] = ['''past_key_values'''] lowerCamelCase_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__(self , __magic_name__=5_8101 , __magic_name__=None , __magic_name__=1024 , __magic_name__=12 , __magic_name__=4096 , __magic_name__=16 , __magic_name__=12 , __magic_name__=4096 , __magic_name__=16 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__=True , __magic_name__="gelu" , __magic_name__=1024 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=5_8100 , __magic_name__=False , __magic_name__=5_8100 , __magic_name__=0 , __magic_name__=0 , __magic_name__=True , **__magic_name__ , ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = vocab_size snake_case_ : str = decoder_vocab_size or vocab_size snake_case_ : Union[str, Any] = max_position_embeddings snake_case_ : str = d_model snake_case_ : Tuple = encoder_ffn_dim snake_case_ : List[Any] = encoder_layers snake_case_ : List[Any] = encoder_attention_heads snake_case_ : List[Any] = decoder_ffn_dim snake_case_ : List[str] = decoder_layers snake_case_ : List[Any] = decoder_attention_heads snake_case_ : str = dropout snake_case_ : int = attention_dropout snake_case_ : Dict = activation_dropout snake_case_ : Union[str, Any] = activation_function snake_case_ : Union[str, Any] = init_std snake_case_ : List[Any] = encoder_layerdrop snake_case_ : Optional[Any] = decoder_layerdrop snake_case_ : Optional[int] = use_cache snake_case_ : Union[str, Any] = encoder_layers snake_case_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True snake_case_ : Union[str, Any] = share_encoder_decoder_embeddings super().__init__( pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , ) class __lowerCAmelCase ( _a ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: snake_case_ : str = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ : Optional[Any] = {0: '''batch'''} snake_case_ : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: snake_case_ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} snake_case_ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. snake_case_ : Any = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ , snake_case_ : Any = self.num_layers for i in range(__magic_name__ ): snake_case_ : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''} else: snake_case_ : Optional[Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: snake_case_ : int = super().outputs else: snake_case_ : Tuple = super(__magic_name__ , self ).outputs if self.use_past: snake_case_ , snake_case_ : Dict = self.num_layers for i in range(__magic_name__ ): snake_case_ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]: '''simple docstring''' snake_case_ : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # Generate decoder inputs snake_case_ : Optional[Any] = seq_length if not self.use_past else 1 snake_case_ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) snake_case_ : List[str] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} snake_case_ : Optional[Any] = dict(**__magic_name__ , **__magic_name__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ , snake_case_ : Optional[int] = common_inputs['''input_ids'''].shape snake_case_ : List[Any] = common_inputs['''decoder_input_ids'''].shape[1] snake_case_ , snake_case_ : List[str] = self.num_attention_heads snake_case_ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ : List[Any] = decoder_seq_length + 3 snake_case_ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) snake_case_ : Dict = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 ) snake_case_ : Dict = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered snake_case_ , snake_case_ : Dict = self.num_layers snake_case_ : Optional[Any] = min(__magic_name__ , __magic_name__ ) snake_case_ : Dict = max(__magic_name__ , __magic_name__ ) - min_num_layers snake_case_ : Optional[Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__magic_name__ ): common_inputs["past_key_values"].append( ( torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ ), ) ) # TODO: test this. snake_case_ : Optional[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__magic_name__ , __magic_name__ ): common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) ) return common_inputs def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]: '''simple docstring''' snake_case_ : int = self._generate_dummy_inputs_for_encoder_and_decoder( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ , snake_case_ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values snake_case_ : int = seqlen + 2 snake_case_ , snake_case_ : Optional[int] = self.num_layers snake_case_ , snake_case_ : Union[str, Any] = self.num_attention_heads snake_case_ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ : int = common_inputs['''attention_mask'''].dtype snake_case_ : Optional[int] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) snake_case_ : List[str] = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ ) ] return common_inputs def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]: '''simple docstring''' snake_case_ : int = compute_effective_axis_dimension( __magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX snake_case_ : str = tokenizer.num_special_tokens_to_add(__magic_name__ ) snake_case_ : Optional[int] = compute_effective_axis_dimension( __magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ ) # Generate dummy inputs according to compute batch and sequence snake_case_ : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size snake_case_ : Optional[Any] = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) ) return common_inputs def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: snake_case_ : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) else: snake_case_ : Optional[Any] = self._generate_dummy_inputs_for_causal_lm( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) return common_inputs def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: snake_case_ : Dict = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) else: snake_case_ : Tuple = super(__magic_name__ , self )._flatten_past_key_values_( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @property def lowerCamelCase (self ) -> float: '''simple docstring''' return 1e-4
279
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Any = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case_ : List[Any] = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 ) snake_case_ : str = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' for example in examples: snake_case_ : Union[str, Any] = video_classifier(__magic_name__ ) self.assertEqual( __magic_name__ , [ {'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )}, {'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )}, ] , ) @require_torch def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Any = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' snake_case_ : str = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) snake_case_ : int = pipeline( '''video-classification''' , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 ) snake_case_ : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case_ : Union[str, Any] = video_classifier(__magic_name__ , top_k=2 ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , ) snake_case_ : int = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' pass
279
1
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __lowerCAmelCase ( _a, _a, _a ): @register_to_config def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False , ) -> Optional[int]: '''simple docstring''' super().__init__() snake_case_ : Tuple = nn.Embedding(__magic_name__ , __magic_name__ ) snake_case_ : Union[str, Any] = nn.Embedding(__magic_name__ , __magic_name__ ) snake_case_ : List[str] = False snake_case_ : Union[str, Any] = nn.Dropout(p=__magic_name__ ) snake_case_ : Any = TaConfig( vocab_size=__magic_name__ , d_model=__magic_name__ , num_heads=__magic_name__ , d_kv=__magic_name__ , d_ff=__magic_name__ , dropout_rate=__magic_name__ , feed_forward_proj=__magic_name__ , is_decoder=__magic_name__ , is_encoder_decoder=__magic_name__ , ) snake_case_ : List[Any] = nn.ModuleList() for lyr_num in range(__magic_name__ ): snake_case_ : List[Any] = TaBlock(__magic_name__ ) self.encoders.append(__magic_name__ ) snake_case_ : List[str] = TaLayerNorm(__magic_name__ ) snake_case_ : int = nn.Dropout(p=__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : Dict = self.token_embedder(__magic_name__ ) snake_case_ : int = encoder_input_tokens.shape[1] snake_case_ : Union[str, Any] = torch.arange(__magic_name__ , device=encoder_input_tokens.device ) x += self.position_encoding(__magic_name__ ) snake_case_ : Any = self.dropout_pre(__magic_name__ ) # inverted the attention mask snake_case_ : Dict = encoder_input_tokens.size() snake_case_ : Dict = self.get_extended_attention_mask(__magic_name__ , __magic_name__ ) for lyr in self.encoders: snake_case_ : Dict = lyr(__magic_name__ , __magic_name__ )[0] snake_case_ : Optional[int] = self.layer_norm(__magic_name__ ) return self.dropout_post(__magic_name__ ), encoder_inputs_mask
279
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" return "\n".join( f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=1_0))
279
1
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]: """simple docstring""" snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) ) snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )] index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase ) snake_case_ : float = 0 snake_case_ : list[float] = [0] * len(_UpperCamelCase ) for i in index: if weight[i] <= capacity: snake_case_ : Dict = 1 max_value += value[i] capacity -= weight[i] else: snake_case_ : Union[str, Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
279
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCAmelCase_ = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" snake_case_ : List[str] = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ : str = list(s_dict.keys() ) for key in keys: snake_case_ : Optional[int] = key for k, v in WHISPER_MAPPING.items(): if k in key: snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase ) print(f'''{key} -> {new_key}''' ) snake_case_ : Tuple = s_dict.pop(_UpperCamelCase ) return s_dict def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ , snake_case_ : Dict = emb.weight.shape snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase ) snake_case_ : Any = emb.weight.data return lin_layer def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes: """simple docstring""" os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) snake_case_ : List[Any] = os.path.basename(_UpperCamelCase ) snake_case_ : Any = url.split('''/''' )[-2] snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase ) if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ): raise RuntimeError(f'''{download_target} exists and is not a regular file''' ) if os.path.isfile(_UpperCamelCase ): snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read() if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' ) with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop: while True: snake_case_ : Dict = source.read(8_192 ) if not buffer: break output.write(_UpperCamelCase ) loop.update(len(_UpperCamelCase ) ) snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read() if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" if ".pt" not in checkpoint_path: snake_case_ : str = _download(_MODELS[checkpoint_path] ) else: snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' ) snake_case_ : int = original_checkpoint['''dims'''] snake_case_ : List[str] = original_checkpoint['''model_state_dict'''] snake_case_ : str = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(_UpperCamelCase ) rename_keys(_UpperCamelCase ) snake_case_ : Optional[int] = True snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] snake_case_ : List[str] = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase ) if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f''' but all the following weights are missing {missing}''' ) if tie_embeds: snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case_ : Any = proj_out_weights model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCAmelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
279
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
279
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase_ = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase_ = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase_ = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase_ = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ , snake_case_ : Any = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) ) snake_case_ : Any = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] snake_case_ , snake_case_ : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase_ ( _UpperCamelCase = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(_UpperCamelCase )) @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : str = PokerHand(_UpperCamelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" assert PokerHand(_UpperCamelCase )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Dict = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS] snake_case_ : str = poker_hands.copy() shuffle(_UpperCamelCase ) snake_case_ : List[str] = chain(sorted(_UpperCamelCase ) ) for index, hand in enumerate(_UpperCamelCase ): assert hand == poker_hands[index] def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=_UpperCamelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Dict = PokerHand('''2C 4S AS 3D 5C''' ) snake_case_ : str = True snake_case_ : Tuple = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase_ ( ) -> List[str]: """simple docstring""" snake_case_ : List[str] = 0 snake_case_ : Union[str, Any] = os.path.abspath(os.path.dirname(_UpperCamelCase ) ) snake_case_ : Dict = os.path.join(_UpperCamelCase , '''poker_hands.txt''' ) with open(_UpperCamelCase ) as file_hand: for line in file_hand: snake_case_ : Dict = line[:14].strip() snake_case_ : List[str] = line[15:].strip() snake_case_ , snake_case_ : str = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase ) snake_case_ : int = player.compare_with(_UpperCamelCase ) if output == "Win": answer += 1 assert answer == 376
279
1
import os def lowerCamelCase_ ( ) -> Dict: """simple docstring""" with open(os.path.dirname(_UpperCamelCase ) + '''/grid.txt''' ) as f: snake_case_ : Any = [] # noqa: E741 for _ in range(20 ): l.append([int(_UpperCamelCase ) for x in f.readline().split()] ) snake_case_ : Optional[int] = 0 # right for i in range(20 ): for j in range(17 ): snake_case_ : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: snake_case_ : Union[str, Any] = temp # down for i in range(17 ): for j in range(20 ): snake_case_ : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: snake_case_ : Any = temp # diagonal 1 for i in range(17 ): for j in range(17 ): snake_case_ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: snake_case_ : Union[str, Any] = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): snake_case_ : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: snake_case_ : int = temp return maximum if __name__ == "__main__": print(solution())
279
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available lowerCAmelCase_ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : List[str] lowerCamelCase_ : Optional[List[str]] @dataclass class __lowerCAmelCase : lowerCamelCase_ : List[int] lowerCamelCase_ : List[int] lowerCamelCase_ : Optional[List[int]] = None lowerCamelCase_ : Optional[List[int]] = None class __lowerCAmelCase ( _a ): lowerCamelCase_ : str = '''train''' lowerCamelCase_ : List[str] = '''dev''' lowerCamelCase_ : List[Any] = '''test''' class __lowerCAmelCase : @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> List[InputExample]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ ) -> List[str]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__=1 , __magic_name__="[SEP]" , __magic_name__=False , __magic_name__=False , __magic_name__=0 , __magic_name__=0 , __magic_name__=-100 , __magic_name__=0 , __magic_name__=True , ) -> List[InputFeatures]: '''simple docstring''' snake_case_ : Optional[int] = {label: i for i, label in enumerate(__magic_name__ )} snake_case_ : Dict = [] for ex_index, example in enumerate(__magic_name__ ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' , __magic_name__ , len(__magic_name__ ) ) snake_case_ : List[str] = [] snake_case_ : List[str] = [] for word, label in zip(example.words , example.labels ): snake_case_ : Optional[Any] = tokenizer.tokenize(__magic_name__ ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(__magic_name__ ) > 0: tokens.extend(__magic_name__ ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__magic_name__ ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. snake_case_ : Union[str, Any] = tokenizer.num_special_tokens_to_add() if len(__magic_name__ ) > max_seq_length - special_tokens_count: snake_case_ : str = tokens[: (max_seq_length - special_tokens_count)] snake_case_ : Any = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] snake_case_ : Union[str, Any] = [sequence_a_segment_id] * len(__magic_name__ ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: snake_case_ : Union[str, Any] = [cls_token] + tokens snake_case_ : List[Any] = [pad_token_label_id] + label_ids snake_case_ : Optional[Any] = [cls_token_segment_id] + segment_ids snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. snake_case_ : int = [1 if mask_padding_with_zero else 0] * len(__magic_name__ ) # Zero-pad up to the sequence length. snake_case_ : Optional[int] = max_seq_length - len(__magic_name__ ) if pad_on_left: snake_case_ : Optional[Any] = ([pad_token] * padding_length) + input_ids snake_case_ : Optional[int] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask snake_case_ : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids snake_case_ : Dict = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''' ) logger.info('''guid: %s''' , example.guid ) logger.info('''tokens: %s''' , ''' '''.join([str(__magic_name__ ) for x in tokens] ) ) logger.info('''input_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_ids] ) ) logger.info('''input_mask: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_mask] ) ) logger.info('''segment_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in segment_ids] ) ) logger.info('''label_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : int = None features.append( InputFeatures( input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , label_ids=__magic_name__ ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = nn.CrossEntropyLoss().ignore_index def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = os.path.join( __magic_name__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__magic_name__ ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case_ : Dict = cached_features_file + '''.lock''' with FileLock(__magic_name__ ): if os.path.exists(__magic_name__ ) and not overwrite_cache: logger.info(F'''Loading features from cached file {cached_features_file}''' ) snake_case_ : Dict = torch.load(__magic_name__ ) else: logger.info(F'''Creating features from dataset file at {data_dir}''' ) snake_case_ : Any = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F'''Saving features into cached file {cached_features_file}''' ) torch.save(self.features , __magic_name__ ) def __len__(self ) -> Optional[Any]: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class __lowerCAmelCase : lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = -100 def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : Optional[Any] = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: snake_case_ : int = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] ), '''token_type_ids''': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__(self ) -> str: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i]
279
1
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available lowerCAmelCase_ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : List[str] lowerCamelCase_ : Optional[List[str]] @dataclass class __lowerCAmelCase : lowerCamelCase_ : List[int] lowerCamelCase_ : List[int] lowerCamelCase_ : Optional[List[int]] = None lowerCamelCase_ : Optional[List[int]] = None class __lowerCAmelCase ( _a ): lowerCamelCase_ : str = '''train''' lowerCamelCase_ : List[str] = '''dev''' lowerCamelCase_ : List[Any] = '''test''' class __lowerCAmelCase : @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> List[InputExample]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ ) -> List[str]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__=1 , __magic_name__="[SEP]" , __magic_name__=False , __magic_name__=False , __magic_name__=0 , __magic_name__=0 , __magic_name__=-100 , __magic_name__=0 , __magic_name__=True , ) -> List[InputFeatures]: '''simple docstring''' snake_case_ : Optional[int] = {label: i for i, label in enumerate(__magic_name__ )} snake_case_ : Dict = [] for ex_index, example in enumerate(__magic_name__ ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' , __magic_name__ , len(__magic_name__ ) ) snake_case_ : List[str] = [] snake_case_ : List[str] = [] for word, label in zip(example.words , example.labels ): snake_case_ : Optional[Any] = tokenizer.tokenize(__magic_name__ ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(__magic_name__ ) > 0: tokens.extend(__magic_name__ ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__magic_name__ ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. snake_case_ : Union[str, Any] = tokenizer.num_special_tokens_to_add() if len(__magic_name__ ) > max_seq_length - special_tokens_count: snake_case_ : str = tokens[: (max_seq_length - special_tokens_count)] snake_case_ : Any = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] snake_case_ : Union[str, Any] = [sequence_a_segment_id] * len(__magic_name__ ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: snake_case_ : Union[str, Any] = [cls_token] + tokens snake_case_ : List[Any] = [pad_token_label_id] + label_ids snake_case_ : Optional[Any] = [cls_token_segment_id] + segment_ids snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. snake_case_ : int = [1 if mask_padding_with_zero else 0] * len(__magic_name__ ) # Zero-pad up to the sequence length. snake_case_ : Optional[int] = max_seq_length - len(__magic_name__ ) if pad_on_left: snake_case_ : Optional[Any] = ([pad_token] * padding_length) + input_ids snake_case_ : Optional[int] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask snake_case_ : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids snake_case_ : Dict = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''' ) logger.info('''guid: %s''' , example.guid ) logger.info('''tokens: %s''' , ''' '''.join([str(__magic_name__ ) for x in tokens] ) ) logger.info('''input_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_ids] ) ) logger.info('''input_mask: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_mask] ) ) logger.info('''segment_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in segment_ids] ) ) logger.info('''label_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : int = None features.append( InputFeatures( input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , label_ids=__magic_name__ ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = nn.CrossEntropyLoss().ignore_index def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = os.path.join( __magic_name__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__magic_name__ ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case_ : Dict = cached_features_file + '''.lock''' with FileLock(__magic_name__ ): if os.path.exists(__magic_name__ ) and not overwrite_cache: logger.info(F'''Loading features from cached file {cached_features_file}''' ) snake_case_ : Dict = torch.load(__magic_name__ ) else: logger.info(F'''Creating features from dataset file at {data_dir}''' ) snake_case_ : Any = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F'''Saving features into cached file {cached_features_file}''' ) torch.save(self.features , __magic_name__ ) def __len__(self ) -> Optional[Any]: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class __lowerCAmelCase : lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = -100 def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : Optional[Any] = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: snake_case_ : int = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] ), '''token_type_ids''': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__(self ) -> str: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i]
279
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[int] = SpeechTaTokenizer lowerCamelCase_ : int = False lowerCamelCase_ : Dict = True def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case_ : Tuple = SpeechTaTokenizer(__magic_name__ ) snake_case_ : Any = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ ) snake_case_ : int = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Dict = '''this is a test''' snake_case_ : int = '''this is a test''' return input_text, output_text def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> List[Any]: '''simple docstring''' snake_case_ , snake_case_ : int = self.get_input_output_texts(__magic_name__ ) snake_case_ : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) snake_case_ : Any = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) return text, ids def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = '''<pad>''' snake_case_ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(__magic_name__ ) , 81 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : int = tokenizer.vocab_size snake_case_ : Optional[Any] = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ : List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] snake_case_ : List[Any] = tokenizer.add_tokens(__magic_name__ ) snake_case_ : Dict = tokenizer.vocab_size snake_case_ : Optional[Any] = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) ) snake_case_ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} snake_case_ : List[str] = tokenizer.add_special_tokens(__magic_name__ ) snake_case_ : Dict = tokenizer.vocab_size snake_case_ : Dict = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) ) snake_case_ : Tuple = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> List[str]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.get_tokenizer() snake_case_ : Optional[Any] = tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ ) # fmt: off self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = [ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off snake_case_ : List[Any] = { '''input_ids''': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
279
1
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=64 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> Tuple: '''simple docstring''' snake_case_ : Any = parent snake_case_ : Optional[int] = batch_size snake_case_ : Tuple = seq_length snake_case_ : Optional[int] = is_training snake_case_ : Dict = use_input_mask snake_case_ : Tuple = use_token_type_ids snake_case_ : Optional[int] = use_labels snake_case_ : List[str] = vocab_size snake_case_ : str = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : int = hidden_act snake_case_ : str = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : Tuple = max_position_embeddings snake_case_ : Tuple = type_vocab_size snake_case_ : Optional[Any] = type_sequence_label_size snake_case_ : Dict = initializer_range snake_case_ : List[Any] = num_labels snake_case_ : Tuple = num_choices snake_case_ : List[str] = scope snake_case_ : str = vocab_size - 1 def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Any = None if self.use_input_mask: snake_case_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : Union[str, Any] = None if self.use_labels: snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ : Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def lowerCamelCase (self ) -> Any: '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ : Tuple = True return config, input_ids, input_mask, token_labels def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = GPTNeoXModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ ) snake_case_ : List[str] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[Any] = True snake_case_ : Optional[int] = GPTNeoXModel(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = GPTNeoXForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' snake_case_ : int = self.num_labels snake_case_ : Union[str, Any] = GPTNeoXForQuestionAnswering(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : List[str] = model(__magic_name__ , attention_mask=__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple: '''simple docstring''' snake_case_ : int = self.num_labels snake_case_ : Optional[int] = GPTNeoXForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' snake_case_ : List[str] = self.num_labels snake_case_ : Optional[Any] = GPTNeoXForTokenClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = True snake_case_ : Union[str, Any] = GPTNeoXForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # first forward pass snake_case_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ ) snake_case_ : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case_ : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ : Dict = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , output_hidden_states=__magic_name__ ) snake_case_ : Tuple = output_from_no_past['''hidden_states'''][0] snake_case_ : Dict = model( __magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )['''hidden_states'''][0] # select random slice snake_case_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case_ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Any = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ : Tuple = config_and_inputs snake_case_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( _a, _a, _a, unittest.TestCase ): lowerCamelCase_ : str = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase_ : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else () lowerCamelCase_ : Optional[Any] = ( { '''feature-extraction''': GPTNeoXModel, '''question-answering''': GPTNeoXForQuestionAnswering, '''text-classification''': GPTNeoXForSequenceClassification, '''text-generation''': GPTNeoXForCausalLM, '''token-classification''': GPTNeoXForTokenClassification, '''zero-shot''': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ : Any = False lowerCamelCase_ : List[str] = False lowerCamelCase_ : Tuple = False lowerCamelCase_ : str = False def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : List[str] = GPTNeoXModelTester(self ) snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=64 , num_attention_heads=8 ) def lowerCamelCase (self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(__magic_name__ , __magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case_ : Tuple = None self.model_tester.create_and_check_model_as_decoder(__magic_name__ , __magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(__magic_name__ , __magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*__magic_name__ ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) @unittest.skip(reason='''Feed forward chunking is not implemented''' ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Union[str, Any] = ids_tensor([1, 10] , config.vocab_size ) snake_case_ : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case_ : Union[str, Any] = GPTNeoXModel(__magic_name__ ) original_model.to(__magic_name__ ) original_model.eval() snake_case_ : Dict = original_model(__magic_name__ ).last_hidden_state snake_case_ : Optional[int] = original_model(__magic_name__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case_ : List[Any] = {'''type''': scaling_type, '''factor''': 10.0} snake_case_ : Union[str, Any] = GPTNeoXModel(__magic_name__ ) scaled_model.to(__magic_name__ ) scaled_model.eval() snake_case_ : List[Any] = scaled_model(__magic_name__ ).last_hidden_state snake_case_ : Dict = scaled_model(__magic_name__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : List[str] = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' ) for checkpointing in [True, False]: snake_case_ : int = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(__magic_name__ ) snake_case_ : List[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__magic_name__ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 snake_case_ : List[Any] = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure''' snake_case_ : List[Any] = model.generate(**__magic_name__ , do_sample=__magic_name__ , max_new_tokens=20 ) snake_case_ : Optional[Any] = tokenizer.batch_decode(__magic_name__ )[0] self.assertEqual(__magic_name__ , __magic_name__ )
279
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): snake_case_ : Dict = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(_UpperCamelCase ): # looping through rows of graph array for i in range(_UpperCamelCase ): # looping through columns of graph array for j in range(_UpperCamelCase ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): snake_case_ : List[Any] = dist[i][k] + dist[k][j] _print_dist(_UpperCamelCase , _UpperCamelCase ) return dist, v if __name__ == "__main__": lowerCAmelCase_ = int(input('''Enter number of vertices: ''')) lowerCAmelCase_ = int(input('''Enter number of edges: ''')) lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)] for i in range(v): lowerCAmelCase_ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('''\nEdge ''', i + 1) lowerCAmelCase_ = int(input('''Enter source:''')) lowerCAmelCase_ = int(input('''Enter destination:''')) lowerCAmelCase_ = float(input('''Enter weight:''')) lowerCAmelCase_ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
279
1
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class __lowerCAmelCase ( _a ): def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']} return Dataset.from_dict(__magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : int = self._create_example_records() snake_case_ : str = Dataset.from_list(__magic_name__ ) self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] ) for i, r in enumerate(__magic_name__ ): self.assertDictEqual(__magic_name__ , example_records[i] ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : List[Any] = self._create_example_records() snake_case_ : Tuple = Dataset.from_list(__magic_name__ ) snake_case_ : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def lowerCamelCase (self ) -> int: # checks what happens with missing columns '''simple docstring''' snake_case_ : Optional[int] = [{'''col_1''': 1}, {'''col_2''': '''x'''}] snake_case_ : str = Dataset.from_list(__magic_name__ ) self.assertDictEqual(dset[0] , {'''col_1''': 1} ) self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns def lowerCamelCase (self ) -> List[str]: # checks if the type can be inferred from the second record '''simple docstring''' snake_case_ : Optional[Any] = [{'''col_1''': []}, {'''col_1''': [1, 2]}] snake_case_ : Optional[int] = Dataset.from_list(__magic_name__ ) self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Dict = Dataset.from_list([] ) self.assertEqual(len(__magic_name__ ) , 0 ) self.assertListEqual(dset.column_names , [] )
279
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class __lowerCAmelCase : def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' return None class __lowerCAmelCase : def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' return None class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ ) @require_torch @slow def lowerCamelCase (self ) -> int: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ ) @require_torch @slow def lowerCamelCase (self ) -> int: '''simple docstring''' from transformers import BertModel snake_case_ : str = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(__magic_name__ ) ) vocab_file.flush() snake_case_ : Optional[Any] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: snake_case_ : str = BertModel(BertConfig(vocab_size=len(__magic_name__ ) ) ) model.save_pretrained(__magic_name__ ) self._test_export(__magic_name__ , '''pt''' , 12 , __magic_name__ ) @require_tf @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: snake_case_ : Tuple = self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ ) snake_case_ : List[str] = quantize(Path(__magic_name__ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: snake_case_ : Any = self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ ) snake_case_ : Any = quantize(__magic_name__ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: snake_case_ : List[str] = Path(__magic_name__ ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) return path except Exception as e: self.fail(__magic_name__ ) @require_torch @require_tokenizers @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' from transformers import BertModel snake_case_ : Optional[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) snake_case_ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''pt''' ) @require_tf @require_tokenizers @slow def lowerCamelCase (self ) -> List[str]: '''simple docstring''' from transformers import TFBertModel snake_case_ : Any = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) snake_case_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''tf''' ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : Tuple = FeatureExtractionPipeline(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = infer_shapes(__magic_name__ , __magic_name__ ) # Assert all variables are present self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __magic_name__ ) self.assertSequenceEqual(variable_names[3:] , __magic_name__ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] snake_case_ : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} snake_case_ , snake_case_ : Tuple = ensure_valid_input(FuncContiguousArgs() , __magic_name__ , __magic_name__ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__magic_name__ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__magic_name__ ) , set(__magic_name__ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__magic_name__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) snake_case_ , snake_case_ : Dict = ensure_valid_input(FuncNonContiguousArgs() , __magic_name__ , __magic_name__ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
279
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = '''▁''' lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} lowerCAmelCase_ = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } lowerCAmelCase_ = {'''vinai/bartpho-syllable''': 1_0_2_4} class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = VOCAB_FILES_NAMES lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : Any = ['''input_ids''', '''attention_mask'''] def __init__(self , __magic_name__ , __magic_name__ , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__ = None , **__magic_name__ , ) -> None: '''simple docstring''' snake_case_ : List[str] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token snake_case_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) snake_case_ : Union[str, Any] = vocab_file snake_case_ : List[str] = monolingual_vocab_file snake_case_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__magic_name__ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility snake_case_ : int = {} snake_case_ : Dict = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__magic_name__ ) not in self.fairseq_tokens_to_ids: snake_case_ : Optional[int] = cnt cnt += 1 with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): snake_case_ : List[str] = line.strip().split()[0] snake_case_ : List[str] = len(self.fairseq_tokens_to_ids ) if str(__magic_name__ ) not in self.fairseq_tokens_to_ids: snake_case_ : Tuple = len(self.fairseq_tokens_to_ids ) snake_case_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__(self ) -> List[Any]: '''simple docstring''' snake_case_ : Any = self.__dict__.copy() snake_case_ : List[str] = None snake_case_ : List[str] = self.sp_model.serialized_model_proto() return state def __setstate__(self , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ : Dict = {} snake_case_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] snake_case_ : Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is None: return [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]: '''simple docstring''' snake_case_ : str = [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase (self , __magic_name__ ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> List[Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : Dict = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__magic_name__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : int = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : List[str] = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , '''wb''' ) as fi: snake_case_ : int = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __magic_name__ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __magic_name__ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F'''{str(__magic_name__ )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
279
lowerCAmelCase_ = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case_ : str = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {", ".join(_UpperCamelCase )}''' ) raise ValueError(_UpperCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
279
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_a ) class __lowerCAmelCase ( _a ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCamelCase_ : str = field(default='''summarization''', metadata={'''include_in_asdict_even_if_is_default''': True} ) lowerCamelCase_ : ClassVar[Features] = Features({'''text''': Value('''string''' )} ) lowerCamelCase_ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} ) lowerCamelCase_ : str = "text" lowerCamelCase_ : str = "summary" @property def lowerCamelCase (self ) -> Dict[str, str]: '''simple docstring''' return {self.text_column: "text", self.summary_column: "summary"}
279
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets lowerCAmelCase_ = datasets.logging.get_logger(__name__) lowerCAmelCase_ = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' lowerCAmelCase_ = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' lowerCAmelCase_ = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' lowerCAmelCase_ = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , ) def lowerCamelCase (self , __magic_name__ ) -> List[Any]: '''simple docstring''' if self.config_name == "default": logger.warning( '''Using default BLEURT-Base checkpoint for sequence maximum length 128. ''' '''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' ) snake_case_ : Dict = '''bleurt-base-128''' if self.config_name.lower() in CHECKPOINT_URLS: snake_case_ : Optional[int] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: snake_case_ : Union[str, Any] = self.config_name.upper() else: raise KeyError( F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ ) return {"scores": scores}
279
1
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[int]: """simple docstring""" if attention_mask is None: snake_case_ : Tuple = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class __lowerCAmelCase : lowerCamelCase_ : Optional[Any] = OPTConfig lowerCamelCase_ : Dict = {} lowerCamelCase_ : Union[str, Any] = '''gelu''' def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=False , __magic_name__=99 , __magic_name__=16 , __magic_name__=2 , __magic_name__=4 , __magic_name__=4 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=20 , __magic_name__=2 , __magic_name__=1 , __magic_name__=0 , __magic_name__=16 , __magic_name__=16 , ) -> Tuple: '''simple docstring''' snake_case_ : Optional[Any] = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Dict = seq_length snake_case_ : Union[str, Any] = is_training snake_case_ : Tuple = use_labels snake_case_ : List[str] = vocab_size snake_case_ : int = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : List[str] = hidden_act snake_case_ : int = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : Optional[int] = max_position_embeddings snake_case_ : List[str] = eos_token_id snake_case_ : Dict = pad_token_id snake_case_ : List[Any] = bos_token_id snake_case_ : Optional[Any] = embed_dim snake_case_ : Dict = word_embed_proj_dim snake_case_ : List[str] = False def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) snake_case_ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) snake_case_ : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) snake_case_ : List[str] = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__magic_name__ , **self.config_updates , ) snake_case_ : Optional[Any] = prepare_opt_inputs_dict(__magic_name__ , __magic_name__ ) return config, inputs_dict def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = TFOPTModel(config=__magic_name__ ) snake_case_ : Optional[Any] = inputs_dict['''input_ids'''] snake_case_ : Optional[Any] = input_ids[:1, :] snake_case_ : Tuple = inputs_dict['''attention_mask'''][:1, :] snake_case_ : Optional[int] = 1 # first forward pass snake_case_ : Optional[int] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ ) snake_case_ , snake_case_ : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case_ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and snake_case_ : Dict = tf.concat([input_ids, next_tokens] , axis=-1 ) snake_case_ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) snake_case_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )[0] snake_case_ : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice snake_case_ : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) snake_case_ : List[str] = output_from_no_past[:, -3:, random_slice_idx] snake_case_ : Dict = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1e-3 ) @require_tf class __lowerCAmelCase ( _a, _a, unittest.TestCase ): lowerCamelCase_ : Any = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowerCamelCase_ : List[str] = (TFOPTForCausalLM,) if is_tf_available() else () lowerCamelCase_ : List[str] = ( {'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {} ) lowerCamelCase_ : Dict = False lowerCamelCase_ : Optional[Any] = False lowerCamelCase_ : Dict = False lowerCamelCase_ : Union[str, Any] = 10 def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Union[str, Any] = TFOPTModelTester(self ) snake_case_ : Optional[int] = ConfigTester(self , config_class=__magic_name__ ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(__magic_name__ , __magic_name__ ): if hasattr(__magic_name__ , '''weight''' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(__magic_name__ , '''weight''' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings snake_case_ : Union[str, Any] = model_class(config=__magic_name__ ) snake_case_ : Dict = _get_word_embedding_weight(__magic_name__ , model.get_input_embeddings() ) snake_case_ : Any = _get_word_embedding_weight(__magic_name__ , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(__magic_name__ ) snake_case_ : List[str] = _get_word_embedding_weight(__magic_name__ , model.get_input_embeddings() ) snake_case_ : Any = _get_word_embedding_weight(__magic_name__ , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. snake_case_ : Tuple = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , __magic_name__ ) # check that weights remain the same after resizing snake_case_ : Optional[Any] = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: snake_case_ : Optional[Any] = False self.assertTrue(__magic_name__ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , __magic_name__ ) snake_case_ : str = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: snake_case_ : Optional[Any] = False self.assertTrue(__magic_name__ ) def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" return tf.constant(_UpperCamelCase , dtype=tf.intaa ) @require_tf class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Any = 99 def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = tf.ones((4, 1) , dtype=tf.intaa ) * 2 snake_case_ : Union[str, Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) snake_case_ : Dict = input_ids.shape[0] snake_case_ : Optional[Any] = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = TFOPTModel.from_pretrained('''facebook/opt-350m''' ) snake_case_ : List[str] = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) snake_case_ : List[Any] = tf.not_equal(__magic_name__ , model.config.pad_token_id ) with tf.GradientTape(): snake_case_ : Union[str, Any] = model(input_ids=__magic_name__ , attention_mask=__magic_name__ ).last_hidden_state snake_case_ : Optional[Any] = (1, 11, 512) self.assertEqual(output.shape , __magic_name__ ) snake_case_ : List[Any] = tf.constant( [[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] ) self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=4e-3 ) ) snake_case_ : Dict = tf.function(__magic_name__ , jit_compile=__magic_name__ ) snake_case_ : Any = xla_generate(__magic_name__ , __magic_name__ )[0] self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=4e-2 ) ) @require_tf @slow class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self ) -> int: '''simple docstring''' super().setUp() snake_case_ : Any = '''facebook/opt-350m''' def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : int = TFOPTForCausalLM.from_pretrained(self.path_model ) snake_case_ : str = GPTaTokenizer.from_pretrained(self.path_model ) snake_case_ : int = [ '''Today is a beautiful day and I want to''', '''In the city of''', '''Paris is the capital of France and''', '''Computers and mobile phones have taken''', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False snake_case_ : List[Any] = tokenizer(__magic_name__ , return_tensors='''tf''' , padding=__magic_name__ , add_special_tokens=__magic_name__ ) snake_case_ : Dict = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) snake_case_ : Dict = tf.constant( [ [1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670], [-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822], [0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703], [6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477], ] ) self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-4 ) ) snake_case_ : Optional[int] = tf.function(__magic_name__ , jit_compile=__magic_name__ ) snake_case_ : str = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-4 ) ) @require_tf @slow class __lowerCAmelCase ( unittest.TestCase ): @property def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : str = '''facebook/opt-125m''' snake_case_ : Tuple = [ '''Today is a beautiful day and I want to''', '''In the city of New York, the city''', '''Paris is the capital of France and the capital''', '''Computers and mobile phones have taken over the''', ] snake_case_ : str = [] snake_case_ : Tuple = GPTaTokenizer.from_pretrained(__magic_name__ ) snake_case_ : List[Any] = TFOPTForCausalLM.from_pretrained(__magic_name__ ) for prompt in self.prompts: snake_case_ : Tuple = tokenizer(__magic_name__ , return_tensors='''tf''' ).input_ids snake_case_ : str = model.generate(__magic_name__ , max_length=10 ) snake_case_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) predicted_outputs += generated_string self.assertListEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : List[str] = '''facebook/opt-350m''' snake_case_ : str = GPTaTokenizer.from_pretrained(__magic_name__ ) snake_case_ : str = TFOPTForCausalLM.from_pretrained(__magic_name__ ) snake_case_ : Union[str, Any] = '''left''' # use different length sentences to test batching snake_case_ : List[str] = [ '''Hello, my dog is a little''', '''Today, I''', ] snake_case_ : int = tokenizer(__magic_name__ , return_tensors='''tf''' , padding=__magic_name__ ) snake_case_ : List[Any] = inputs['''input_ids'''] snake_case_ : Tuple = model.generate(input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''] ) snake_case_ : List[Any] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids snake_case_ : int = model.generate(input_ids=__magic_name__ ) snake_case_ : Optional[int] = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) ) snake_case_ : Any = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids snake_case_ : Dict = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings ) snake_case_ : Union[str, Any] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) snake_case_ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ ) snake_case_ : Tuple = [ '''Hello, my dog is a little bit of a dork.\nI\'m a little bit''', '''Today, I was in the middle of a conversation with a friend about the''', ] self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Optional[Any] = '''facebook/opt-350m''' snake_case_ : Optional[Any] = [ '''Today is a beautiful day and I want to''', '''In the city of San Francisco, the city''', '''Paris is the capital of France and the capital''', '''Computers and mobile phones have taken over the''', ] snake_case_ : List[str] = [] snake_case_ : Union[str, Any] = GPTaTokenizer.from_pretrained(__magic_name__ ) snake_case_ : int = TFOPTForCausalLM.from_pretrained(__magic_name__ ) for prompt in self.prompts: snake_case_ : Dict = tokenizer(__magic_name__ , return_tensors='''tf''' ).input_ids snake_case_ : Optional[int] = model.generate(__magic_name__ , max_length=10 ) snake_case_ : Tuple = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) predicted_outputs += generated_string self.assertListEqual(__magic_name__ , __magic_name__ )
279
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''): lowerCAmelCase_ = { '''linear''': PIL.Image.Resampling.BILINEAR, '''bilinear''': PIL.Image.Resampling.BILINEAR, '''bicubic''': PIL.Image.Resampling.BICUBIC, '''lanczos''': PIL.Image.Resampling.LANCZOS, '''nearest''': PIL.Image.Resampling.NEAREST, } else: lowerCAmelCase_ = { '''linear''': PIL.Image.LINEAR, '''bilinear''': PIL.Image.BILINEAR, '''bicubic''': PIL.Image.BICUBIC, '''lanczos''': PIL.Image.LANCZOS, '''nearest''': PIL.Image.NEAREST, } def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Dict = (images / 2 + 0.5).clamp(0 , 1 ) snake_case_ : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() snake_case_ : int = numpy_to_pil(_UpperCamelCase ) return images def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" if images.ndim == 3: snake_case_ : Optional[Any] = images[None, ...] snake_case_ : Any = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images snake_case_ : str = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: snake_case_ : List[Any] = [Image.fromarray(_UpperCamelCase ) for image in images] return pil_images
279
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = { '''yjernite/retribert-base-uncased''': 5_1_2, } lowerCAmelCase_ = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[Any] = VOCAB_FILES_NAMES lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION lowerCamelCase_ : Optional[Any] = RetriBertTokenizer lowerCamelCase_ : Dict = ['''input_ids''', '''attention_mask'''] def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> Optional[Any]: '''simple docstring''' super().__init__( __magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , ) snake_case_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __magic_name__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , __magic_name__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __magic_name__ ) != tokenize_chinese_chars ): snake_case_ : int = getattr(__magic_name__ , normalizer_state.pop('''type''' ) ) snake_case_ : Union[str, Any] = do_lower_case snake_case_ : Union[str, Any] = strip_accents snake_case_ : int = tokenize_chinese_chars snake_case_ : Tuple = normalizer_class(**__magic_name__ ) snake_case_ : Dict = do_lower_case def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> Any: '''simple docstring''' snake_case_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]: '''simple docstring''' snake_case_ : Optional[int] = [self.sep_token_id] snake_case_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]: '''simple docstring''' snake_case_ : Any = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ )
279
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Any = BioGptTokenizer lowerCamelCase_ : Optional[Any] = False def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : Optional[Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(__magic_name__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : str = '''lower newer''' snake_case_ : Dict = '''lower newer''' return input_text, output_text def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file ) snake_case_ : Union[str, Any] = '''lower''' snake_case_ : Optional[int] = ['''low''', '''er</w>'''] snake_case_ : Any = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = tokens + ['''<unk>'''] snake_case_ : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ ) snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
279
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = ArgumentParser( description=( '''PyTorch TPU distributed training launch ''' '''helper utility that will spawn up ''' '''multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' , type=_UpperCamelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' , type=_UpperCamelCase , help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) , ) # rest from the training program parser.add_argument('''training_script_args''' , nargs=_UpperCamelCase ) return parser.parse_args() def lowerCamelCase_ ( ) -> Optional[int]: """simple docstring""" snake_case_ : Tuple = parse_args() # Import training_script as a module. snake_case_ : Dict = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) snake_case_ : Tuple = script_fpath.stem snake_case_ : List[str] = importlib.import_module(_UpperCamelCase ) # Patch sys.argv snake_case_ : Dict = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
279
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]: """simple docstring""" snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) ) snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )] index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase ) snake_case_ : float = 0 snake_case_ : list[float] = [0] * len(_UpperCamelCase ) for i in index: if weight[i] <= capacity: snake_case_ : Dict = 1 max_value += value[i] capacity -= weight[i] else: snake_case_ : Union[str, Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
279
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
279
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict: '''simple docstring''' snake_case_ : Tuple = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Union[str, Any] = num_channels snake_case_ : Optional[Any] = image_size snake_case_ : int = min_resolution snake_case_ : Any = max_resolution snake_case_ : Tuple = do_resize snake_case_ : str = size_divisor snake_case_ : Optional[Any] = do_rescale def lowerCamelCase (self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = GLPNImageProcessingTester(self ) @property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) ) self.assertTrue(hasattr(__magic_name__ , '''resample''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
279
1
from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCAmelCase_ = {'''tokenization_bertweet''': ['''BertweetTokenizer''']} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
279
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
279
1
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=False , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=19 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> int: '''simple docstring''' snake_case_ : str = parent snake_case_ : List[str] = batch_size snake_case_ : Optional[Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : Tuple = use_input_mask snake_case_ : List[Any] = use_token_type_ids snake_case_ : int = use_labels snake_case_ : Optional[Any] = vocab_size snake_case_ : Any = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Tuple = intermediate_size snake_case_ : int = hidden_act snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : List[str] = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : Optional[int] = type_vocab_size snake_case_ : Optional[int] = type_sequence_label_size snake_case_ : Optional[int] = initializer_range snake_case_ : Union[str, Any] = num_labels snake_case_ : List[str] = num_choices snake_case_ : Dict = scope def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Optional[int] = None if self.use_input_mask: snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : List[str] = None snake_case_ : Dict = None snake_case_ : int = None if self.use_labels: snake_case_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__magic_name__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , ) return config def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = EsmForProteinFolding(config=__magic_name__ ).float() model.to(__magic_name__ ) model.eval() snake_case_ : int = model(__magic_name__ , attention_mask=__magic_name__ ) snake_case_ : Optional[Any] = model(__magic_name__ ) snake_case_ : str = model(__magic_name__ ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) : Tuple = config_and_inputs snake_case_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( _a, _a, unittest.TestCase ): lowerCamelCase_ : Dict = False lowerCamelCase_ : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else () lowerCamelCase_ : List[Any] = () lowerCamelCase_ : Optional[Any] = {} if is_torch_available() else {} lowerCamelCase_ : Dict = False def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = EsmFoldModelTester(self ) snake_case_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) @unittest.skip('''Does not support attention outputs''' ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass @unittest.skip('''Esm does not support embedding resizing''' ) def lowerCamelCase (self ) -> int: '''simple docstring''' pass @unittest.skip('''Esm does not support embedding resizing''' ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowerCamelCase (self ) -> str: '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def lowerCamelCase (self ) -> int: '''simple docstring''' pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip('''ESMFold only has one output format.''' ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support input chunking.''' ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def lowerCamelCase (self ) -> str: '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def lowerCamelCase (self ) -> int: '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' pass @require_torch class __lowerCAmelCase ( _a ): @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Optional[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() snake_case_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) snake_case_ : Optional[int] = model(__magic_name__ )['''positions'''] snake_case_ : Tuple = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __magic_name__ , atol=1e-4 ) )
279
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase_ = float('''nan''') class __lowerCAmelCase : def __init__(self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : List[Any] = sys.stdout snake_case_ : int = open(__magic_name__ , '''a''' ) def __getattr__(self , __magic_name__ ) -> Dict: '''simple docstring''' return getattr(self.stdout , __magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' self.stdout.write(__magic_name__ ) # strip tqdm codes self.file.write(re.sub(R'''^.*\r''' , '''''' , __magic_name__ , 0 , re.M ) ) def lowerCamelCase_ ( _UpperCamelCase=80 , _UpperCamelCase=False ) -> str: """simple docstring""" snake_case_ : str = [] # deal with critical env vars snake_case_ : int = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: snake_case_ : Optional[int] = os.environ.get(_UpperCamelCase , _UpperCamelCase ) if val is not None: cmd.append(f'''{key}={val}''' ) # python executable (not always needed if the script is executable) snake_case_ : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(_UpperCamelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes snake_case_ : Dict = [] snake_case_ : Dict = '''''' while len(_UpperCamelCase ) > 0: current_line += f'''{cmd.pop(0 )} ''' if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(_UpperCamelCase ) snake_case_ : List[Any] = '''''' return "\\\n".join(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" snake_case_ : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd ) # remove --output_dir if any and set our own snake_case_ : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd ) args.base_cmd += f''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir snake_case_ : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , ) snake_case_ : Tuple = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase ) if verbose: print('''STDOUT''' , result.stdout ) print('''STDERR''' , result.stderr ) # save the streams snake_case_ : Any = variation.replace(''' ''' , '''-''' ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f: f.write(result.stdout ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f: snake_case_ : str = json.load(_UpperCamelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple: """simple docstring""" snake_case_ : Tuple = [] snake_case_ : Any = [] snake_case_ : int = f'''{id}: {variation:<{longest_variation_len}}''' snake_case_ : Optional[Any] = f'''{preamble}: ''' snake_case_ : Optional[int] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ): snake_case_ : int = process_run_single( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ : List[str] = single_run_metrics[target_metric_key] if not math.isnan(_UpperCamelCase ): metrics.append(_UpperCamelCase ) results.append(_UpperCamelCase ) outcome += "✓" else: outcome += "✘" snake_case_ : Any = f'''\33[2K\r{outcome}''' if len(_UpperCamelCase ) > 0: snake_case_ : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} snake_case_ : Any = round(mean_metrics[target_metric_key] , 2 ) snake_case_ : List[str] = f'''{outcome} {mean_target}''' if len(_UpperCamelCase ) > 1: results_str += f''' {tuple(round(_UpperCamelCase , 2 ) for x in results )}''' print(_UpperCamelCase ) snake_case_ : Optional[int] = variation return mean_metrics else: print(_UpperCamelCase ) return {variation_key: variation, target_metric_key: nan} def lowerCamelCase_ ( ) -> Optional[int]: """simple docstring""" snake_case_ : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return f''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : str = pd.DataFrame(_UpperCamelCase ) snake_case_ : Optional[int] = '''variation''' snake_case_ : Union[str, Any] = '''diff_%''' snake_case_ : Optional[int] = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan snake_case_ : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(_UpperCamelCase ): # as a fallback, use the minimal value as the sentinel snake_case_ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(_UpperCamelCase ): snake_case_ : Dict = df.apply( lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='''columns''' , ) # re-order columns snake_case_ : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys] snake_case_ : int = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols # capitalize snake_case_ : Optional[int] = df.rename(str.capitalize , axis='''columns''' ) # make the cols as narrow as possible snake_case_ : Any = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' ) snake_case_ : int = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' ) snake_case_ : Tuple = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] print('''\n\n'''.join(_UpperCamelCase ) ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) snake_case_ : Tuple = parser.parse_args() snake_case_ : Optional[Any] = args.output_dir Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) snake_case_ : Optional[int] = get_base_command(_UpperCamelCase , _UpperCamelCase ) # split each dimension into its --foo variations snake_case_ : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty snake_case_ : List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) ) snake_case_ : Optional[int] = max(len(_UpperCamelCase ) for x in variations ) # split wanted keys snake_case_ : int = args.report_metric_keys.split() # capture prints into a log file for convenience snake_case_ : str = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(f'''and this script\'s output is also piped into {report_fn}''' ) snake_case_ : Tuple = Tee(_UpperCamelCase ) print(f'''\n*** Running {len(_UpperCamelCase )} benchmarks:''' ) print(f'''Base command: {" ".join(_UpperCamelCase )}''' ) snake_case_ : List[Any] = '''variation''' snake_case_ : Tuple = [] for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ): snake_case_ : Optional[Any] = base_cmd + variation.split() results.append( process_run( id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) ) process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase ) if __name__ == "__main__": main()
279
1
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def lowerCamelCase_ ( _UpperCamelCase ) -> tuple: """simple docstring""" return (data["data"], data["target"]) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> np.ndarray: """simple docstring""" snake_case_ : Union[str, Any] = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(_UpperCamelCase , _UpperCamelCase ) # Predict target for test data snake_case_ : Optional[Any] = xgb.predict(_UpperCamelCase ) snake_case_ : List[str] = predictions.reshape(len(_UpperCamelCase ) , 1 ) return predictions def lowerCamelCase_ ( ) -> None: """simple docstring""" snake_case_ : Any = fetch_california_housing() snake_case_ , snake_case_ : Tuple = data_handling(_UpperCamelCase ) snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = train_test_split( _UpperCamelCase , _UpperCamelCase , test_size=0.25 , random_state=1 ) snake_case_ : List[Any] = xgboost(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(_UpperCamelCase , _UpperCamelCase )}''' ) print(f'''Mean Square Error : {mean_squared_error(_UpperCamelCase , _UpperCamelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
279
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowerCAmelCase_ = CLIPImageProcessor() lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') lowerCAmelCase_ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
279
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''philschmid/bart-large-cnn-samsum''' lowerCamelCase_ : List[str] = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) lowerCamelCase_ : Optional[Any] = '''summarizer''' lowerCamelCase_ : str = AutoTokenizer lowerCamelCase_ : Any = AutoModelForSeqaSeqLM lowerCamelCase_ : Optional[int] = ['''text'''] lowerCamelCase_ : Dict = ['''text'''] def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' return self.pre_processor(__magic_name__ , return_tensors='''pt''' , truncation=__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' return self.model.generate(**__magic_name__ )[0] def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return self.pre_processor.decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
279
from math import factorial lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)} def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) ) def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length snake_case_ : Optional[Any] = 0 # the cached sizes of the previous chains snake_case_ : dict[int, int] = {} for start_chain_element in range(1 , _UpperCamelCase ): # The temporary set will contain the elements of the chain snake_case_ : List[str] = set() snake_case_ : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. snake_case_ : Any = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_UpperCamelCase ) chain_set_length += 1 snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] snake_case_ : List[str] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution()}''')
279
1
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup lowerCAmelCase_ = { '''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''' } def lowerCamelCase_ ( _UpperCamelCase = "dhaka" , _UpperCamelCase = 5 ) -> int: """simple docstring""" snake_case_ : Dict = min(_UpperCamelCase , 50 ) # Prevent abuse! snake_case_ : str = { '''q''': query, '''tbm''': '''isch''', '''hl''': '''en''', '''ijn''': '''0''', } snake_case_ : Tuple = requests.get('''https://www.google.com/search''' , params=_UpperCamelCase , headers=_UpperCamelCase ) snake_case_ : Optional[int] = BeautifulSoup(html.text , '''html.parser''' ) snake_case_ : Tuple = ''''''.join( re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) ) snake_case_ : Optional[Any] = json.dumps(_UpperCamelCase ) snake_case_ : Union[str, Any] = json.loads(_UpperCamelCase ) snake_case_ : Dict = re.findall( R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , _UpperCamelCase , ) if not matched_google_image_data: return 0 snake_case_ : int = re.sub( R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(_UpperCamelCase ) , ) snake_case_ : List[Any] = re.findall( R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , _UpperCamelCase , ) for index, fixed_full_res_image in enumerate(_UpperCamelCase ): if index >= max_images: return index snake_case_ : Union[str, Any] = bytes(_UpperCamelCase , '''ascii''' ).decode( '''unicode-escape''' ) snake_case_ : Any = bytes(_UpperCamelCase , '''ascii''' ).decode( '''unicode-escape''' ) snake_case_ : Tuple = urllib.request.build_opener() snake_case_ : Union[str, Any] = [ ( '''User-Agent''', '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''', ) ] urllib.request.install_opener(_UpperCamelCase ) snake_case_ : Optional[Any] = f'''query_{query.replace(" " , "_" )}''' if not os.path.exists(_UpperCamelCase ): os.makedirs(_UpperCamelCase ) urllib.request.urlretrieve( # noqa: S310 _UpperCamelCase , f'''{path_name}/original_size_img_{index}.jpg''' ) return index if __name__ == "__main__": try: lowerCAmelCase_ = download_images_from_google_query(sys.argv[1]) print(F'''{image_count} images were downloaded to disk.''') except IndexError: print('''Please provide a search term.''') raise
279
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''''' lowerCamelCase_ : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) lowerCamelCase_ : str = None # compression type in fsspec. ex: "gzip" lowerCamelCase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__(self , __magic_name__ = "" , __magic_name__ = None , __magic_name__ = None , **__magic_name__ ) -> Any: '''simple docstring''' super().__init__(self , **__magic_name__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case_ : Union[str, Any] = fsspec.open( __magic_name__ , mode='''rb''' , protocol=__magic_name__ , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) snake_case_ : Tuple = os.path.basename(self.file.path.split('''::''' )[0] ) snake_case_ : Optional[Any] = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) snake_case_ : Dict = None @classmethod def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]: '''simple docstring''' return super()._strip_protocol(__magic_name__ ).lstrip('''/''' ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' if self.dir_cache is None: snake_case_ : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} snake_case_ : List[str] = {f['''name''']: f} def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return self.file.open().read() def lowerCamelCase (self , __magic_name__ , __magic_name__ = "rb" , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = self._strip_protocol(__magic_name__ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''bz2''' lowerCamelCase_ : Any = '''bz2''' lowerCamelCase_ : int = '''.bz2''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''gzip''' lowerCamelCase_ : Dict = '''gzip''' lowerCamelCase_ : int = '''.gz''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''lz4''' lowerCamelCase_ : Any = '''lz4''' lowerCamelCase_ : Optional[Any] = '''.lz4''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''xz''' lowerCamelCase_ : Any = '''xz''' lowerCamelCase_ : int = '''.xz''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''zstd''' lowerCamelCase_ : Tuple = '''zstd''' lowerCamelCase_ : Any = '''.zst''' def __init__(self , __magic_name__ , __magic_name__ = "rb" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = DEFAULT_BLOCK_SIZE , **__magic_name__ , ) -> Tuple: '''simple docstring''' super().__init__( fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case_ : Dict = self.file.__enter__ class __lowerCAmelCase : def __init__(self , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : str = file_ def __enter__(self ) -> List[Any]: '''simple docstring''' self._file.__enter__() return self def __exit__(self , *__magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' self._file.__exit__(*__magic_name__ , **__magic_name__ ) def __iter__(self ) -> Optional[int]: '''simple docstring''' return iter(self._file ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return next(self._file ) def __getattr__(self , __magic_name__ ) -> str: '''simple docstring''' return getattr(self._file , __magic_name__ ) def fixed_enter(*__magic_name__ , **__magic_name__ ): return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) ) snake_case_ : Tuple = fixed_enter
279
1
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) lowerCAmelCase_ = 2_9_9_7_9_2_4_5_8 # Symbols lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = symbols('''ct x y z''') def lowerCamelCase_ ( _UpperCamelCase ) -> float: """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def lowerCamelCase_ ( _UpperCamelCase ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(_UpperCamelCase ) ** 2 ) def lowerCamelCase_ ( _UpperCamelCase ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(_UpperCamelCase ), -gamma(_UpperCamelCase ) * beta(_UpperCamelCase ), 0, 0], [-gamma(_UpperCamelCase ) * beta(_UpperCamelCase ), gamma(_UpperCamelCase ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None ) -> np.ndarray: """simple docstring""" if event is None: snake_case_ : Union[str, Any] = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(_UpperCamelCase ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: lowerCAmelCase_ = transform(2_9_9_7_9_2_4_5) print('''Example of four vector: ''') print(F'''ct\' = {four_vector[0]}''') print(F'''x\' = {four_vector[1]}''') print(F'''y\' = {four_vector[2]}''') print(F'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values lowerCAmelCase_ = {ct: c, x: 1, y: 1, z: 1} lowerCAmelCase_ = [four_vector[i].subs(sub_dict) for i in range(4)] print(F'''\n{numerical_vector}''')
279
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''megatron-bert''' def __init__(self , __magic_name__=2_9056 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=__magic_name__ , **__magic_name__ ) snake_case_ : Union[str, Any] = vocab_size snake_case_ : Dict = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : int = hidden_act snake_case_ : List[str] = intermediate_size snake_case_ : Dict = hidden_dropout_prob snake_case_ : str = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : Any = type_vocab_size snake_case_ : int = initializer_range snake_case_ : int = layer_norm_eps snake_case_ : List[str] = position_embedding_type snake_case_ : Dict = use_cache
279
1
import math from datetime import datetime, timedelta def lowerCamelCase_ ( _UpperCamelCase ) -> datetime: """simple docstring""" snake_case_ : List[str] = year % 19 snake_case_ : Union[str, Any] = year % 4 snake_case_ : List[str] = year % 7 snake_case_ : Optional[Any] = math.floor(year / 100 ) snake_case_ : Any = math.floor((13 + 8 * leap_day_inhibits) / 25 ) snake_case_ : Optional[int] = leap_day_inhibits / 4 snake_case_ : Union[str, Any] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 snake_case_ : Tuple = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 snake_case_ : Optional[Any] = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon snake_case_ : Optional[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(_UpperCamelCase , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(_UpperCamelCase , 4 , 18 ) else: return datetime(_UpperCamelCase , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): lowerCAmelCase_ = '''will be''' if year > datetime.now().year else '''was''' print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
279
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch lowerCAmelCase_ = random.Random() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]: """simple docstring""" if rng is None: snake_case_ : str = global_rng snake_case_ : Any = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=400 , __magic_name__=2000 , __magic_name__=10 , __magic_name__=160 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=4000 , __magic_name__=False , __magic_name__=True , ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = parent snake_case_ : str = batch_size snake_case_ : Union[str, Any] = min_seq_length snake_case_ : Tuple = max_seq_length snake_case_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case_ : Optional[int] = padding_value snake_case_ : Union[str, Any] = sampling_rate snake_case_ : Optional[int] = return_attention_mask snake_case_ : str = do_normalize snake_case_ : str = feature_size snake_case_ : Optional[Any] = chunk_length snake_case_ : Union[str, Any] = hop_length def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCamelCase (self , __magic_name__=False , __magic_name__=False ) -> Optional[Any]: '''simple docstring''' def _flatten(__magic_name__ ): return list(itertools.chain(*__magic_name__ ) ) if equal_length: snake_case_ : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size snake_case_ : int = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case_ : str = [np.asarray(__magic_name__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = WhisperFeatureExtractionTester(self ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Union[str, Any] = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) snake_case_ : List[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ ) snake_case_ : Optional[int] = feat_extract_first.to_dict() snake_case_ : Dict = feat_extract_second.to_dict() snake_case_ : List[str] = feat_extract_first.mel_filters snake_case_ : Union[str, Any] = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(__magic_name__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(__magic_name__ ) snake_case_ : Optional[int] = self.feature_extraction_class.from_json_file(__magic_name__ ) snake_case_ : int = feat_extract_first.to_dict() snake_case_ : Optional[int] = feat_extract_second.to_dict() snake_case_ : Union[str, Any] = feat_extract_first.mel_filters snake_case_ : str = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] snake_case_ : str = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] # Test feature size snake_case_ : str = feature_extractor(__magic_name__ , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input snake_case_ : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features snake_case_ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test batched snake_case_ : int = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] snake_case_ : List[str] = np.asarray(__magic_name__ ) snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : Dict = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test truncation required snake_case_ : Any = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] snake_case_ : Union[str, Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] snake_case_ : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs] snake_case_ : Optional[Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated] snake_case_ : Any = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) def lowerCamelCase (self ) -> int: '''simple docstring''' import torch snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa ) snake_case_ : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech snake_case_ : Optional[Any] = ds.sort('''id''' ).select(range(__magic_name__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : str = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on snake_case_ : List[Any] = self._load_datasamples(1 ) snake_case_ : Union[str, Any] = WhisperFeatureExtractor() snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1e-4 ) ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Optional[int] = self._load_datasamples(1 )[0] snake_case_ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue snake_case_ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0] self.assertTrue(np.all(np.mean(__magic_name__ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1e-3 ) )
279
1
def lowerCamelCase_ ( _UpperCamelCase ) -> float: """simple docstring""" snake_case_ : Optional[int] = 0 while len(_UpperCamelCase ) > 1: snake_case_ : Tuple = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): snake_case_ : Any = files.index(min(_UpperCamelCase ) ) temp += files[min_index] files.pop(_UpperCamelCase ) files.append(_UpperCamelCase ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
279
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase_ = logging.getLogger(__name__) def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" snake_case_ : List[str] = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=_UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=_UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=_UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=_UpperCamelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=_UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=_UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) snake_case_ : List[Any] = parser.parse_args() return args def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" def fn(_UpperCamelCase ): return tokenizer(examples['''text'''] ) return fn def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Any = [] for i in range(len(tokenized_data['''input_ids'''] ) ): snake_case_ : Any = { '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } snake_case_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase ) snake_case_ : Optional[Any] = tf.train.Example(features=_UpperCamelCase ) snake_case_ : Optional[Any] = example.SerializeToString() records.append(_UpperCamelCase ) return records def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , args.limit ) snake_case_ : int = dataset.select(range(_UpperCamelCase ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) snake_case_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) snake_case_ : str = os.path.join(args.output_dir , args.split ) if not os.path.exists(_UpperCamelCase ): os.makedirs(_UpperCamelCase ) else: snake_case_ : Optional[Any] = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. snake_case_ : Optional[Any] = tokenize_function(_UpperCamelCase ) snake_case_ : List[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(_UpperCamelCase ): # Concatenate all texts. snake_case_ : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()} snake_case_ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 snake_case_ : int = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. snake_case_ : Union[str, Any] = { k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )] for k, t in concatenated_examples.items() } return result snake_case_ : int = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1_000 , num_proc=4 ) snake_case_ : str = 0 snake_case_ : Optional[Any] = 0 for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ): snake_case_ : Any = grouped_dataset[shard : shard + args.shard_size] snake_case_ : str = len(dataset_snapshot['''input_ids'''] ) snake_case_ : Union[str, Any] = os.path.join(_UpperCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) snake_case_ : Dict = get_serialized_examples(_UpperCamelCase ) with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file: for i in range(len(_UpperCamelCase ) ): snake_case_ : List[str] = serialized_examples[i] out_file.write(_UpperCamelCase ) print('''Wrote file {} containing {} records'''.format(_UpperCamelCase , _UpperCamelCase ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f: print(f'''Total {args.split} records: {total_records}''' , file=_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = parse_args() main(args)
279
1
import numpy as np class __lowerCAmelCase : def __init__(self ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = (0, 0) snake_case_ : Any = None snake_case_ : List[Any] = 0 snake_case_ : Tuple = 0 snake_case_ : List[str] = 0 def __eq__(self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return self.position == cell.position def lowerCamelCase (self ) -> int: '''simple docstring''' print(self.position ) class __lowerCAmelCase : def __init__(self , __magic_name__=(5, 5) ) -> int: '''simple docstring''' snake_case_ : Tuple = np.zeros(__magic_name__ ) snake_case_ : Union[str, Any] = world_size[0] snake_case_ : str = world_size[1] def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' print(self.w ) def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : List[str] = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] snake_case_ : Tuple = cell.position[0] snake_case_ : Tuple = cell.position[1] snake_case_ : Dict = [] for n in neughbour_cord: snake_case_ : List[str] = current_x + n[0] snake_case_ : Union[str, Any] = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: snake_case_ : Dict = Cell() snake_case_ : Optional[int] = (x, y) snake_case_ : Any = cell neighbours.append(__magic_name__ ) return neighbours def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : List[Any] = [] snake_case_ : Optional[int] = [] _open.append(_UpperCamelCase ) while _open: snake_case_ : Dict = np.argmin([n.f for n in _open] ) snake_case_ : Optional[Any] = _open[min_f] _closed.append(_open.pop(_UpperCamelCase ) ) if current == goal: break for n in world.get_neigbours(_UpperCamelCase ): for c in _closed: if c == n: continue snake_case_ : int = current.g + 1 snake_case_ , snake_case_ : Tuple = n.position snake_case_ , snake_case_ : str = goal.position snake_case_ : Dict = (ya - ya) ** 2 + (xa - xa) ** 2 snake_case_ : str = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(_UpperCamelCase ) snake_case_ : List[str] = [] while current.parent is not None: path.append(current.position ) snake_case_ : Union[str, Any] = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": lowerCAmelCase_ = Gridworld() # Start position and goal lowerCAmelCase_ = Cell() lowerCAmelCase_ = (0, 0) lowerCAmelCase_ = Cell() lowerCAmelCase_ = (4, 4) print(F'''path from {start.position} to {goal.position}''') lowerCAmelCase_ = astar(world, start, goal) # Just for visual reasons. for i in s: lowerCAmelCase_ = 1 print(world.w)
279
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Any = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case_ : List[Any] = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 ) snake_case_ : str = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' for example in examples: snake_case_ : Union[str, Any] = video_classifier(__magic_name__ ) self.assertEqual( __magic_name__ , [ {'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )}, {'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )}, ] , ) @require_torch def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Any = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' snake_case_ : str = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) snake_case_ : int = pipeline( '''video-classification''' , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 ) snake_case_ : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case_ : Union[str, Any] = video_classifier(__magic_name__ , top_k=2 ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , ) snake_case_ : int = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' pass
279
1
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" return "\n".join( f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=1_0))
279
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" return "\n".join( f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=1_0))
279
1
lowerCAmelCase_ = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case_ : str = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {", ".join(_UpperCamelCase )}''' ) raise ValueError(_UpperCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
279
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCAmelCase_ = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" snake_case_ : List[str] = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ : str = list(s_dict.keys() ) for key in keys: snake_case_ : Optional[int] = key for k, v in WHISPER_MAPPING.items(): if k in key: snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase ) print(f'''{key} -> {new_key}''' ) snake_case_ : Tuple = s_dict.pop(_UpperCamelCase ) return s_dict def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ , snake_case_ : Dict = emb.weight.shape snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase ) snake_case_ : Any = emb.weight.data return lin_layer def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes: """simple docstring""" os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) snake_case_ : List[Any] = os.path.basename(_UpperCamelCase ) snake_case_ : Any = url.split('''/''' )[-2] snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase ) if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ): raise RuntimeError(f'''{download_target} exists and is not a regular file''' ) if os.path.isfile(_UpperCamelCase ): snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read() if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' ) with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop: while True: snake_case_ : Dict = source.read(8_192 ) if not buffer: break output.write(_UpperCamelCase ) loop.update(len(_UpperCamelCase ) ) snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read() if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" if ".pt" not in checkpoint_path: snake_case_ : str = _download(_MODELS[checkpoint_path] ) else: snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' ) snake_case_ : int = original_checkpoint['''dims'''] snake_case_ : List[str] = original_checkpoint['''model_state_dict'''] snake_case_ : str = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(_UpperCamelCase ) rename_keys(_UpperCamelCase ) snake_case_ : Optional[int] = True snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] snake_case_ : List[str] = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase ) if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f''' but all the following weights are missing {missing}''' ) if tie_embeds: snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case_ : Any = proj_out_weights model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCAmelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
279
1
from __future__ import annotations class __lowerCAmelCase : def __init__(self , __magic_name__=None ) -> Dict: '''simple docstring''' snake_case_ : Dict = data snake_case_ : Any = None def __repr__(self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Any = [] snake_case_ : List[str] = self while temp: string_rep.append(F'''{temp.data}''' ) snake_case_ : Dict = temp.next return "->".join(__magic_name__ ) def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) snake_case_ : Union[str, Any] = Node(elements_list[0] ) for i in range(1 , len(_UpperCamelCase ) ): snake_case_ : Optional[Any] = Node(elements_list[i] ) snake_case_ : Union[str, Any] = current.next return head def lowerCamelCase_ ( _UpperCamelCase ) -> None: """simple docstring""" if head_node is not None and isinstance(_UpperCamelCase , _UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" from doctest import testmod testmod() snake_case_ : List[Any] = make_linked_list([14, 52, 14, 12, 43] ) print('''Linked List:''' ) print(_UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(_UpperCamelCase ) if __name__ == "__main__": main()
279
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase_ = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase_ = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase_ = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase_ = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ , snake_case_ : Any = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) ) snake_case_ : Any = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] snake_case_ , snake_case_ : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase_ ( _UpperCamelCase = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(_UpperCamelCase )) @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : str = PokerHand(_UpperCamelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" assert PokerHand(_UpperCamelCase )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Dict = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS] snake_case_ : str = poker_hands.copy() shuffle(_UpperCamelCase ) snake_case_ : List[str] = chain(sorted(_UpperCamelCase ) ) for index, hand in enumerate(_UpperCamelCase ): assert hand == poker_hands[index] def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=_UpperCamelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Dict = PokerHand('''2C 4S AS 3D 5C''' ) snake_case_ : str = True snake_case_ : Tuple = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase_ ( ) -> List[str]: """simple docstring""" snake_case_ : List[str] = 0 snake_case_ : Union[str, Any] = os.path.abspath(os.path.dirname(_UpperCamelCase ) ) snake_case_ : Dict = os.path.join(_UpperCamelCase , '''poker_hands.txt''' ) with open(_UpperCamelCase ) as file_hand: for line in file_hand: snake_case_ : Dict = line[:14].strip() snake_case_ : List[str] = line[15:].strip() snake_case_ , snake_case_ : str = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase ) snake_case_ : int = player.compare_with(_UpperCamelCase ) if output == "Win": answer += 1 assert answer == 376
279
1
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel lowerCAmelCase_ = { '''gwf-440k''': { '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-small-190k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''jmann-large-580k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''', '''sample_rate''': 4_8_0_0_0, '''sample_size''': 1_3_1_0_7_2, }, '''maestro-uncond-150k''': { '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''unlocked-uncond-250k''': { '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, '''honk-140k''': { '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''', '''sample_rate''': 1_6_0_0_0, '''sample_size''': 6_5_5_3_6, }, } def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return torch.atana(_UpperCamelCase , _UpperCamelCase ) / math.pi * 2 def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : Union[str, Any] = torch.sin(t * math.pi / 2 ) ** 2 snake_case_ : List[Any] = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_UpperCamelCase , _UpperCamelCase ) class __lowerCAmelCase ( _a ): pass class __lowerCAmelCase ( nn.Module ): def __init__(self , __magic_name__ ) -> Optional[int]: '''simple docstring''' super().__init__() snake_case_ : Dict = DiffusionAttnUnetaD(__magic_name__ , n_attn_layers=4 ) snake_case_ : Union[str, Any] = deepcopy(self.diffusion ) snake_case_ : Tuple = torch.quasirandom.SobolEngine(1 , scramble=__magic_name__ ) def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Any = MODELS_MAP[model_name]['''url'''] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' lowerCAmelCase_ = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', } lowerCAmelCase_ = { '''8''': '''resnets.0''', '''9''': '''attentions.0''', '''10''': '''resnets.1''', '''11''': '''attentions.1''', '''12''': '''resnets.2''', '''13''': '''attentions.2''', } lowerCAmelCase_ = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', '''8''': '''resnets.3''', '''9''': '''attentions.3''', '''10''': '''resnets.4''', '''11''': '''attentions.4''', '''12''': '''resnets.5''', '''13''': '''attentions.5''', } lowerCAmelCase_ = { '''0''': '''resnets.0''', '''1''': '''resnets.1''', '''2''': '''resnets.2''', '''4''': '''resnets.0''', '''5''': '''resnets.1''', '''6''': '''resnets.2''', } lowerCAmelCase_ = { '''skip''': '''conv_skip''', '''main.0''': '''conv_1''', '''main.1''': '''group_norm_1''', '''main.3''': '''conv_2''', '''main.4''': '''group_norm_2''', } lowerCAmelCase_ = { '''norm''': '''group_norm''', '''qkv_proj''': ['''query''', '''key''', '''value'''], '''out_proj''': ['''proj_attn'''], } def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if name.startswith('''skip''' ): return name.replace('''skip''' , RES_CONV_MAP['''skip'''] ) # name has to be of format main.{digit} if not name.startswith('''main.''' ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]: """simple docstring""" for key, value in ATTN_MAP.items(): if name.startswith(_UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): return name.replace(_UpperCamelCase , _UpperCamelCase ) elif name.startswith(_UpperCamelCase ): return [name.replace(_UpperCamelCase , _UpperCamelCase ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=13 ) -> Union[str, Any]: """simple docstring""" snake_case_ : Optional[Any] = input_string if string.split('''.''' )[0] == "timestep_embed": return string.replace('''timestep_embed''' , '''time_proj''' ) snake_case_ : List[str] = 0 if string.startswith('''net.3.''' ): depth += 1 snake_case_ : Tuple = string[6:] elif string.startswith('''net.''' ): snake_case_ : int = string[4:] while string.startswith('''main.7.''' ): depth += 1 snake_case_ : Tuple = string[7:] if string.startswith('''main.''' ): snake_case_ : List[Any] = string[5:] # mid block if string[:2].isdigit(): snake_case_ : int = string[:2] snake_case_ : Optional[Any] = string[2:] else: snake_case_ : Tuple = string[0] snake_case_ : Union[str, Any] = string[1:] if depth == max_depth: snake_case_ : str = MID_NUM_TO_LAYER[layer_num] snake_case_ : Optional[int] = '''mid_block''' elif depth > 0 and int(_UpperCamelCase ) < 7: snake_case_ : str = DOWN_NUM_TO_LAYER[layer_num] snake_case_ : Dict = f'''down_blocks.{depth}''' elif depth > 0 and int(_UpperCamelCase ) > 7: snake_case_ : Optional[int] = UP_NUM_TO_LAYER[layer_num] snake_case_ : int = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: snake_case_ : Any = DEPTH_0_TO_LAYER[layer_num] snake_case_ : Optional[int] = f'''up_blocks.{max_depth - 1}''' if int(_UpperCamelCase ) > 3 else '''down_blocks.0''' if not string_left.startswith('''.''' ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) snake_case_ : Optional[int] = string_left[1:] if "resnets" in new_layer: snake_case_ : Tuple = convert_resconv_naming(_UpperCamelCase ) elif "attentions" in new_layer: snake_case_ : Dict = convert_attn_naming(_UpperCamelCase ) snake_case_ : List[str] = new_string_left if not isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Tuple = prefix + '''.''' + new_layer + '''.''' + string_left else: snake_case_ : Tuple = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left] return new_string def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ : Optional[Any] = {} for k, v in state_dict.items(): if k.endswith('''kernel''' ): # up- and downsample layers, don't have trainable weights continue snake_case_ : Dict = rename(_UpperCamelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case_ : List[str] = transform_conv_attns(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) else: snake_case_ : Dict = v return new_state_dict def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: """simple docstring""" if len(_UpperCamelCase ) == 1: if len(v.shape ) == 3: # weight snake_case_ : Dict = v[:, :, 0] else: # bias snake_case_ : Any = v else: # qkv matrices snake_case_ : List[Any] = v.shape[0] snake_case_ : Any = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: snake_case_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0] else: snake_case_ : Tuple = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) snake_case_ : List[Any] = args.model_path.split('''/''' )[-1].split('''.''' )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' snake_case_ : Tuple = download(_UpperCamelCase ) snake_case_ : List[str] = MODELS_MAP[model_name]['''sample_rate'''] snake_case_ : List[Any] = MODELS_MAP[model_name]['''sample_size'''] snake_case_ : Optional[Any] = Object() snake_case_ : Union[str, Any] = sample_size snake_case_ : Dict = sample_rate snake_case_ : Tuple = 0 snake_case_ : Optional[Any] = UNetaDModel(sample_size=_UpperCamelCase , sample_rate=_UpperCamelCase ) snake_case_ : Optional[Any] = diffusers_model.state_dict() snake_case_ : int = DiffusionUncond(_UpperCamelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_UpperCamelCase )['''state_dict'''] ) snake_case_ : List[Any] = orig_model.diffusion_ema.eval() snake_case_ : Any = orig_model.state_dict() snake_case_ : Optional[Any] = rename_orig_weights(_UpperCamelCase ) snake_case_ : List[Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) snake_case_ : List[str] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_UpperCamelCase ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith('''kernel''' ) for k in list(_UpperCamelCase ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": snake_case_ : Dict = value.squeeze() snake_case_ : List[Any] = value diffusers_model.load_state_dict(_UpperCamelCase ) snake_case_ : Dict = 100 snake_case_ : List[str] = 33 snake_case_ : int = IPNDMScheduler(num_train_timesteps=_UpperCamelCase ) snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase ) snake_case_ : Optional[Any] = torch.randn([1, 2, config.sample_size] , generator=_UpperCamelCase ).to(_UpperCamelCase ) snake_case_ : Union[str, Any] = torch.linspace(1 , 0 , steps + 1 , device=_UpperCamelCase )[:-1] snake_case_ : Dict = get_crash_schedule(_UpperCamelCase ) snake_case_ : int = DanceDiffusionPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase ) snake_case_ : Any = torch.manual_seed(33 ) snake_case_ : Optional[Any] = pipe(num_inference_steps=_UpperCamelCase , generator=_UpperCamelCase ).audios snake_case_ : Tuple = sampling.iplms_sample(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {} ) snake_case_ : Any = generated.clamp(-1 , 1 ) snake_case_ : Optional[Any] = (generated - audio).abs().sum() snake_case_ : List[str] = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print('''Diff sum''' , _UpperCamelCase ) print('''Diff max''' , _UpperCamelCase ) assert diff_max < 1E-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') lowerCAmelCase_ = parser.parse_args() main(args)
279
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available lowerCAmelCase_ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : List[str] lowerCamelCase_ : Optional[List[str]] @dataclass class __lowerCAmelCase : lowerCamelCase_ : List[int] lowerCamelCase_ : List[int] lowerCamelCase_ : Optional[List[int]] = None lowerCamelCase_ : Optional[List[int]] = None class __lowerCAmelCase ( _a ): lowerCamelCase_ : str = '''train''' lowerCamelCase_ : List[str] = '''dev''' lowerCamelCase_ : List[Any] = '''test''' class __lowerCAmelCase : @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> List[InputExample]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ ) -> List[str]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__=1 , __magic_name__="[SEP]" , __magic_name__=False , __magic_name__=False , __magic_name__=0 , __magic_name__=0 , __magic_name__=-100 , __magic_name__=0 , __magic_name__=True , ) -> List[InputFeatures]: '''simple docstring''' snake_case_ : Optional[int] = {label: i for i, label in enumerate(__magic_name__ )} snake_case_ : Dict = [] for ex_index, example in enumerate(__magic_name__ ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' , __magic_name__ , len(__magic_name__ ) ) snake_case_ : List[str] = [] snake_case_ : List[str] = [] for word, label in zip(example.words , example.labels ): snake_case_ : Optional[Any] = tokenizer.tokenize(__magic_name__ ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(__magic_name__ ) > 0: tokens.extend(__magic_name__ ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__magic_name__ ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. snake_case_ : Union[str, Any] = tokenizer.num_special_tokens_to_add() if len(__magic_name__ ) > max_seq_length - special_tokens_count: snake_case_ : str = tokens[: (max_seq_length - special_tokens_count)] snake_case_ : Any = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] snake_case_ : Union[str, Any] = [sequence_a_segment_id] * len(__magic_name__ ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: snake_case_ : Union[str, Any] = [cls_token] + tokens snake_case_ : List[Any] = [pad_token_label_id] + label_ids snake_case_ : Optional[Any] = [cls_token_segment_id] + segment_ids snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. snake_case_ : int = [1 if mask_padding_with_zero else 0] * len(__magic_name__ ) # Zero-pad up to the sequence length. snake_case_ : Optional[int] = max_seq_length - len(__magic_name__ ) if pad_on_left: snake_case_ : Optional[Any] = ([pad_token] * padding_length) + input_ids snake_case_ : Optional[int] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask snake_case_ : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids snake_case_ : Dict = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''' ) logger.info('''guid: %s''' , example.guid ) logger.info('''tokens: %s''' , ''' '''.join([str(__magic_name__ ) for x in tokens] ) ) logger.info('''input_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_ids] ) ) logger.info('''input_mask: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_mask] ) ) logger.info('''segment_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in segment_ids] ) ) logger.info('''label_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : int = None features.append( InputFeatures( input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , label_ids=__magic_name__ ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = nn.CrossEntropyLoss().ignore_index def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = os.path.join( __magic_name__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__magic_name__ ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case_ : Dict = cached_features_file + '''.lock''' with FileLock(__magic_name__ ): if os.path.exists(__magic_name__ ) and not overwrite_cache: logger.info(F'''Loading features from cached file {cached_features_file}''' ) snake_case_ : Dict = torch.load(__magic_name__ ) else: logger.info(F'''Creating features from dataset file at {data_dir}''' ) snake_case_ : Any = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F'''Saving features into cached file {cached_features_file}''' ) torch.save(self.features , __magic_name__ ) def __len__(self ) -> Optional[Any]: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class __lowerCAmelCase : lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = -100 def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : Optional[Any] = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: snake_case_ : int = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] ), '''token_type_ids''': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__(self ) -> str: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i]
279
1
import os from collections.abc import Iterator def lowerCamelCase_ ( _UpperCamelCase = "." ) -> Iterator[str]: """simple docstring""" for dir_path, dir_names, filenames in os.walk(_UpperCamelCase ): snake_case_ : List[str] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._'''] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_UpperCamelCase )[1] in (".py", ".ipynb"): yield os.path.join(_UpperCamelCase , _UpperCamelCase ).lstrip('''./''' ) def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]: """simple docstring""" return f'''{i * " "}*''' if i else "\n##" def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : List[Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_UpperCamelCase ) or old_parts[i] != new_part) and new_part: print(f'''{md_prefix(_UpperCamelCase )} {new_part.replace("_" , " " ).title()}''' ) return new_path def lowerCamelCase_ ( _UpperCamelCase = "." ) -> None: """simple docstring""" snake_case_ : Union[str, Any] = '''''' for filepath in sorted(good_file_paths(_UpperCamelCase ) ): snake_case_ , snake_case_ : Dict = os.path.split(_UpperCamelCase ) if filepath != old_path: snake_case_ : List[str] = print_path(_UpperCamelCase , _UpperCamelCase ) snake_case_ : str = (filepath.count(os.sep ) + 1) if filepath else 0 snake_case_ : int = f'''{filepath}/{filename}'''.replace(''' ''' , '''%20''' ) snake_case_ : List[Any] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0] print(f'''{md_prefix(_UpperCamelCase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md('''.''')
279
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[int] = SpeechTaTokenizer lowerCamelCase_ : int = False lowerCamelCase_ : Dict = True def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case_ : Tuple = SpeechTaTokenizer(__magic_name__ ) snake_case_ : Any = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ ) snake_case_ : int = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Dict = '''this is a test''' snake_case_ : int = '''this is a test''' return input_text, output_text def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> List[Any]: '''simple docstring''' snake_case_ , snake_case_ : int = self.get_input_output_texts(__magic_name__ ) snake_case_ : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) snake_case_ : Any = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) return text, ids def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = '''<pad>''' snake_case_ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(__magic_name__ ) , 81 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : int = tokenizer.vocab_size snake_case_ : Optional[Any] = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ : List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] snake_case_ : List[Any] = tokenizer.add_tokens(__magic_name__ ) snake_case_ : Dict = tokenizer.vocab_size snake_case_ : Optional[Any] = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) ) snake_case_ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} snake_case_ : List[str] = tokenizer.add_special_tokens(__magic_name__ ) snake_case_ : Dict = tokenizer.vocab_size snake_case_ : Dict = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) ) snake_case_ : Tuple = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> List[str]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.get_tokenizer() snake_case_ : Optional[Any] = tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ ) # fmt: off self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = [ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off snake_case_ : List[Any] = { '''input_ids''': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
279
1
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> str: '''simple docstring''' snake_case_ : List[str] = parent snake_case_ : List[Any] = batch_size snake_case_ : int = seq_length snake_case_ : Any = is_training snake_case_ : List[str] = use_input_mask snake_case_ : Tuple = use_token_type_ids snake_case_ : Optional[int] = use_labels snake_case_ : Tuple = vocab_size snake_case_ : int = hidden_size snake_case_ : List[Any] = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : Optional[int] = hidden_act snake_case_ : Tuple = hidden_dropout_prob snake_case_ : str = attention_probs_dropout_prob snake_case_ : Dict = max_position_embeddings snake_case_ : Union[str, Any] = type_vocab_size snake_case_ : Any = type_sequence_label_size snake_case_ : int = initializer_range snake_case_ : Optional[Any] = num_labels snake_case_ : Optional[int] = num_choices snake_case_ : Tuple = scope def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : List[Any] = None if self.use_input_mask: snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : Any = None if self.use_token_type_ids: snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ : Dict = None snake_case_ : Any = None snake_case_ : Dict = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , use_stable_embedding=__magic_name__ , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : Any = OpenLlamaModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : List[str] = model(__magic_name__ , attention_mask=__magic_name__ ) snake_case_ : Any = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> int: '''simple docstring''' snake_case_ : Any = True snake_case_ : Optional[int] = OpenLlamaModel(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Optional[int] = model( __magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , ) snake_case_ : Optional[Any] = model( __magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , ) snake_case_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> str: '''simple docstring''' snake_case_ : List[Any] = OpenLlamaForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Any: '''simple docstring''' snake_case_ : int = True snake_case_ : str = True snake_case_ : Tuple = OpenLlamaForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # first forward pass snake_case_ : Tuple = model( __magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , use_cache=__magic_name__ , ) snake_case_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case_ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case_ : Tuple = model( __magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_hidden_states=__magic_name__ , )['''hidden_states'''][0] snake_case_ : Any = model( __magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )['''hidden_states'''][0] # select random slice snake_case_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ : int = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case_ : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) : Tuple = config_and_inputs snake_case_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( _a, _a, _a, unittest.TestCase ): lowerCamelCase_ : str = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) lowerCamelCase_ : Any = (OpenLlamaForCausalLM,) if is_torch_available() else () lowerCamelCase_ : List[str] = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ : str = False lowerCamelCase_ : Dict = False def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : List[Any] = OpenLlamaModelTester(self ) snake_case_ : Tuple = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ : Dict = type self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Any = 3 snake_case_ : Dict = input_dict['''input_ids'''] snake_case_ : List[str] = input_ids.ne(1 ).to(__magic_name__ ) snake_case_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case_ : List[str] = OpenLlamaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = 3 snake_case_ : Tuple = '''single_label_classification''' snake_case_ : Optional[Any] = input_dict['''input_ids'''] snake_case_ : str = input_ids.ne(1 ).to(__magic_name__ ) snake_case_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case_ : List[Any] = OpenLlamaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[Any] = 3 snake_case_ : Any = '''multi_label_classification''' snake_case_ : Union[str, Any] = input_dict['''input_ids'''] snake_case_ : Tuple = input_ids.ne(1 ).to(__magic_name__ ) snake_case_ : Union[str, Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) snake_case_ : int = OpenLlamaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' ) def lowerCamelCase (self ) -> Any: '''simple docstring''' pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Union[str, Any] = ids_tensor([1, 10] , config.vocab_size ) snake_case_ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case_ : Optional[Any] = OpenLlamaModel(__magic_name__ ) original_model.to(__magic_name__ ) original_model.eval() snake_case_ : Any = original_model(__magic_name__ ).last_hidden_state snake_case_ : List[Any] = original_model(__magic_name__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case_ : Any = {'''type''': scaling_type, '''factor''': 10.0} snake_case_ : List[str] = OpenLlamaModel(__magic_name__ ) scaled_model.to(__magic_name__ ) scaled_model.eval() snake_case_ : Dict = scaled_model(__magic_name__ ).last_hidden_state snake_case_ : int = scaled_model(__magic_name__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) )
279
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): snake_case_ : Dict = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(_UpperCamelCase ): # looping through rows of graph array for i in range(_UpperCamelCase ): # looping through columns of graph array for j in range(_UpperCamelCase ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): snake_case_ : List[Any] = dist[i][k] + dist[k][j] _print_dist(_UpperCamelCase , _UpperCamelCase ) return dist, v if __name__ == "__main__": lowerCAmelCase_ = int(input('''Enter number of vertices: ''')) lowerCAmelCase_ = int(input('''Enter number of edges: ''')) lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)] for i in range(v): lowerCAmelCase_ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('''\nEdge ''', i + 1) lowerCAmelCase_ = int(input('''Enter source:''')) lowerCAmelCase_ = int(input('''Enter destination:''')) lowerCAmelCase_ = float(input('''Enter weight:''')) lowerCAmelCase_ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
279
1
import cmath import math def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> complex: """simple docstring""" snake_case_ : Optional[int] = math.radians(_UpperCamelCase ) snake_case_ : List[str] = math.radians(_UpperCamelCase ) # Convert voltage and current to rectangular form snake_case_ : Optional[Any] = cmath.rect(_UpperCamelCase , _UpperCamelCase ) snake_case_ : Optional[Any] = cmath.rect(_UpperCamelCase , _UpperCamelCase ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
279
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class __lowerCAmelCase : def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' return None class __lowerCAmelCase : def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' return None class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ ) @require_torch @slow def lowerCamelCase (self ) -> int: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ ) @require_torch @slow def lowerCamelCase (self ) -> int: '''simple docstring''' from transformers import BertModel snake_case_ : str = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(__magic_name__ ) ) vocab_file.flush() snake_case_ : Optional[Any] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: snake_case_ : str = BertModel(BertConfig(vocab_size=len(__magic_name__ ) ) ) model.save_pretrained(__magic_name__ ) self._test_export(__magic_name__ , '''pt''' , 12 , __magic_name__ ) @require_tf @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: snake_case_ : Tuple = self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ ) snake_case_ : List[str] = quantize(Path(__magic_name__ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: snake_case_ : Any = self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ ) snake_case_ : Any = quantize(__magic_name__ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: snake_case_ : List[str] = Path(__magic_name__ ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) return path except Exception as e: self.fail(__magic_name__ ) @require_torch @require_tokenizers @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' from transformers import BertModel snake_case_ : Optional[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) snake_case_ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''pt''' ) @require_tf @require_tokenizers @slow def lowerCamelCase (self ) -> List[str]: '''simple docstring''' from transformers import TFBertModel snake_case_ : Any = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) snake_case_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''tf''' ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : Tuple = FeatureExtractionPipeline(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = infer_shapes(__magic_name__ , __magic_name__ ) # Assert all variables are present self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __magic_name__ ) self.assertSequenceEqual(variable_names[3:] , __magic_name__ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] snake_case_ : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} snake_case_ , snake_case_ : Tuple = ensure_valid_input(FuncContiguousArgs() , __magic_name__ , __magic_name__ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__magic_name__ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__magic_name__ ) , set(__magic_name__ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__magic_name__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) snake_case_ , snake_case_ : Dict = ensure_valid_input(FuncNonContiguousArgs() , __magic_name__ , __magic_name__ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
279
1
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__=12 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=32 , __magic_name__=2 , __magic_name__=4 , __magic_name__=37 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=0.02 , __magic_name__=0 , __magic_name__=None , ) -> Tuple: '''simple docstring''' snake_case_ : str = parent snake_case_ : Tuple = batch_size snake_case_ : List[str] = seq_length snake_case_ : Optional[Any] = is_training snake_case_ : int = use_input_mask snake_case_ : Optional[int] = use_labels snake_case_ : int = vocab_size snake_case_ : List[Any] = hidden_size snake_case_ : Tuple = projection_dim snake_case_ : List[Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Optional[int] = intermediate_size snake_case_ : List[str] = dropout snake_case_ : List[str] = attention_dropout snake_case_ : Optional[Any] = max_position_embeddings snake_case_ : int = initializer_range snake_case_ : Dict = scope snake_case_ : List[Any] = bos_token_id def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : int = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: snake_case_ : List[str] = input_mask.numpy() snake_case_ , snake_case_ : Optional[int] = input_mask.shape snake_case_ : Any = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__magic_name__ ): snake_case_ : Tuple = 1 snake_case_ : List[str] = 0 snake_case_ : Optional[Any] = self.get_config() return config, input_ids, tf.convert_to_tensor(__magic_name__ ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = TFBlipTextModel(config=__magic_name__ ) snake_case_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ , training=__magic_name__ ) snake_case_ : Optional[int] = model(__magic_name__ , training=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : Dict = config_and_inputs snake_case_ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[int] = (TFBlipTextModel,) if is_tf_available() else () lowerCamelCase_ : Optional[int] = False lowerCamelCase_ : str = False lowerCamelCase_ : Optional[Any] = False def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Tuple = BlipTextModelTester(self ) snake_case_ : List[str] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> str: '''simple docstring''' pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def lowerCamelCase (self ) -> int: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' pass @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Union[str, Any] = TFBlipTextModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase (self , __magic_name__=True ) -> Optional[Any]: '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=__magic_name__ )
279
lowerCAmelCase_ = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case_ : str = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {", ".join(_UpperCamelCase )}''' ) raise ValueError(_UpperCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
279
1
from jiwer import compute_measures import datasets lowerCAmelCase_ = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' lowerCAmelCase_ = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' lowerCAmelCase_ = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Tuple: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] , ) def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=False ) -> int: '''simple docstring''' if concatenate_texts: return compute_measures(__magic_name__ , __magic_name__ )["wer"] else: snake_case_ : Optional[int] = 0 snake_case_ : Any = 0 for prediction, reference in zip(__magic_name__ , __magic_name__ ): snake_case_ : Tuple = compute_measures(__magic_name__ , __magic_name__ ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
279
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets lowerCAmelCase_ = datasets.logging.get_logger(__name__) lowerCAmelCase_ = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' lowerCAmelCase_ = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' lowerCAmelCase_ = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' lowerCAmelCase_ = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , ) def lowerCamelCase (self , __magic_name__ ) -> List[Any]: '''simple docstring''' if self.config_name == "default": logger.warning( '''Using default BLEURT-Base checkpoint for sequence maximum length 128. ''' '''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' ) snake_case_ : Dict = '''bleurt-base-128''' if self.config_name.lower() in CHECKPOINT_URLS: snake_case_ : Optional[int] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: snake_case_ : Union[str, Any] = self.config_name.upper() else: raise KeyError( F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ ) return {"scores": scores}
279
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" if "resnet-50" in model_name: snake_case_ : List[str] = ResNetConfig.from_pretrained('''microsoft/resnet-50''' ) elif "resnet-101" in model_name: snake_case_ : List[Any] = ResNetConfig.from_pretrained('''microsoft/resnet-101''' ) else: raise ValueError('''Model name should include either resnet50 or resnet101''' ) snake_case_ : Union[str, Any] = DetrConfig(use_timm_backbone=_UpperCamelCase , backbone_config=_UpperCamelCase ) # set label attributes snake_case_ : List[Any] = '''panoptic''' in model_name if is_panoptic: snake_case_ : Any = 250 else: snake_case_ : Any = 91 snake_case_ : Dict = '''huggingface/label-files''' snake_case_ : Any = '''coco-detection-id2label.json''' snake_case_ : Optional[Any] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) snake_case_ : List[str] = {int(_UpperCamelCase ): v for k, v in idalabel.items()} snake_case_ : Tuple = idalabel snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : Any = [] # stem # fmt: off rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') ) rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') ) rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') ) rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') ) rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''', ) ) rename_keys.append( ( f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''', f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''', ) ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) return rename_keys def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" snake_case_ : Optional[int] = state_dict.pop(_UpperCamelCase ) snake_case_ : List[str] = val def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Optional[int]: """simple docstring""" snake_case_ : List[Any] = '''''' if is_panoptic: snake_case_ : Optional[int] = '''detr.''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case_ : Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) snake_case_ : Tuple = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : List[str] = in_proj_weight[:256, :] snake_case_ : Dict = in_proj_bias[:256] snake_case_ : Dict = in_proj_weight[256:512, :] snake_case_ : Union[str, Any] = in_proj_bias[256:512] snake_case_ : int = in_proj_weight[-256:, :] snake_case_ : Tuple = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention snake_case_ : List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) snake_case_ : Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : List[str] = in_proj_weight[:256, :] snake_case_ : str = in_proj_bias[:256] snake_case_ : int = in_proj_weight[256:512, :] snake_case_ : str = in_proj_bias[256:512] snake_case_ : List[str] = in_proj_weight[-256:, :] snake_case_ : List[str] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention snake_case_ : str = state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) snake_case_ : Optional[int] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict snake_case_ : Optional[int] = in_proj_weight_cross_attn[:256, :] snake_case_ : List[str] = in_proj_bias_cross_attn[:256] snake_case_ : List[Any] = in_proj_weight_cross_attn[256:512, :] snake_case_ : Union[str, Any] = in_proj_bias_cross_attn[256:512] snake_case_ : Optional[int] = in_proj_weight_cross_attn[-256:, :] snake_case_ : Optional[Any] = in_proj_bias_cross_attn[-256:] def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ : Union[str, Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False ) -> Union[str, Any]: """simple docstring""" snake_case_ , snake_case_ : int = get_detr_config(_UpperCamelCase ) # load original model from torch hub snake_case_ : Tuple = { '''detr-resnet-50''': '''detr_resnet50''', '''detr-resnet-101''': '''detr_resnet101''', } logger.info(f'''Converting model {model_name}...''' ) snake_case_ : Optional[int] = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=_UpperCamelCase ).eval() snake_case_ : Union[str, Any] = detr.state_dict() # rename keys for src, dest in create_rename_keys(_UpperCamelCase ): if is_panoptic: snake_case_ : List[Any] = '''detr.''' + src rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(_UpperCamelCase , is_panoptic=_UpperCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case_ : Tuple = '''detr.model.''' if is_panoptic else '''model.''' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('''detr''' ) and not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ) ): snake_case_ : List[str] = state_dict.pop(_UpperCamelCase ) snake_case_ : List[Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case_ : int = state_dict.pop(_UpperCamelCase ) snake_case_ : List[Any] = val elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ): continue else: snake_case_ : Tuple = state_dict.pop(_UpperCamelCase ) snake_case_ : Optional[int] = val else: if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): snake_case_ : str = state_dict.pop(_UpperCamelCase ) snake_case_ : Tuple = val # finally, create HuggingFace model and load state dict snake_case_ : List[str] = DetrForSegmentation(_UpperCamelCase ) if is_panoptic else DetrForObjectDetection(_UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) model.eval() # verify our conversion on an image snake_case_ : Optional[Any] = '''coco_panoptic''' if is_panoptic else '''coco_detection''' snake_case_ : Tuple = DetrImageProcessor(format=_UpperCamelCase ) snake_case_ : int = processor(images=prepare_img() , return_tensors='''pt''' ) snake_case_ : Dict = encoding['''pixel_values'''] snake_case_ : Optional[Any] = detr(_UpperCamelCase ) snake_case_ : List[str] = model(_UpperCamelCase ) assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) processor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info('''Uploading PyTorch model and image processor to the hub...''' ) model.push_to_hub(f'''nielsr/{model_name}''' ) processor.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''detr-resnet-50''', type=str, choices=['''detr-resnet-50''', '''detr-resnet-101'''], help='''Name of the DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''') lowerCAmelCase_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
279
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''): lowerCAmelCase_ = { '''linear''': PIL.Image.Resampling.BILINEAR, '''bilinear''': PIL.Image.Resampling.BILINEAR, '''bicubic''': PIL.Image.Resampling.BICUBIC, '''lanczos''': PIL.Image.Resampling.LANCZOS, '''nearest''': PIL.Image.Resampling.NEAREST, } else: lowerCAmelCase_ = { '''linear''': PIL.Image.LINEAR, '''bilinear''': PIL.Image.BILINEAR, '''bicubic''': PIL.Image.BICUBIC, '''lanczos''': PIL.Image.LANCZOS, '''nearest''': PIL.Image.NEAREST, } def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Dict = (images / 2 + 0.5).clamp(0 , 1 ) snake_case_ : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() snake_case_ : int = numpy_to_pil(_UpperCamelCase ) return images def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" if images.ndim == 3: snake_case_ : Optional[Any] = images[None, ...] snake_case_ : Any = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images snake_case_ : str = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: snake_case_ : List[Any] = [Image.fromarray(_UpperCamelCase ) for image in images] return pil_images
279
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase_ = '''pt''' elif is_tf_available(): lowerCAmelCase_ = '''tf''' else: lowerCAmelCase_ = '''jax''' class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[int] = ByTaTokenizer lowerCamelCase_ : str = False def lowerCamelCase (self ) -> Tuple: '''simple docstring''' super().setUp() snake_case_ : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' return ByTaTokenizer.from_pretrained('''google/byt5-small''' ) def lowerCamelCase (self , **__magic_name__ ) -> ByTaTokenizer: '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> Tuple[str, list]: '''simple docstring''' snake_case_ : List[Any] = [] for i in range(len(__magic_name__ ) ): try: snake_case_ : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__magic_name__ ) except UnicodeDecodeError: pass toks.append((i, tok) ) snake_case_ : Union[str, Any] = list(filter(lambda __magic_name__ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __magic_name__ ) ) snake_case_ : Optional[Any] = list(filter(lambda __magic_name__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__magic_name__ ) , __magic_name__ ) ) if max_length is not None and len(__magic_name__ ) > max_length: snake_case_ : Union[str, Any] = toks[:max_length] if min_length is not None and len(__magic_name__ ) < min_length and len(__magic_name__ ) > 0: while len(__magic_name__ ) < min_length: snake_case_ : int = toks + toks # toks_str = [t[1] for t in toks] snake_case_ : Any = [t[0] for t in toks] # Ensure consistency snake_case_ : List[str] = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) if " " not in output_txt and len(__magic_name__ ) > 1: snake_case_ : Any = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__magic_name__ ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__magic_name__ ) ) if with_prefix_space: snake_case_ : Optional[Any] = ''' ''' + output_txt snake_case_ : Optional[int] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) return output_txt, output_ids def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = self.ta_base_tokenizer snake_case_ : int = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] ) snake_case_ : Optional[Any] = tokenizer(['''hi''', '''I went to the gym''', ''''''] ) self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[Any] = self.ta_base_tokenizer snake_case_ : Union[str, Any] = '''Unicode €.''' snake_case_ : List[str] = tokenizer(__magic_name__ ) snake_case_ : str = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['''input_ids'''] , __magic_name__ ) # decoding snake_case_ : Any = tokenizer.decode(__magic_name__ ) self.assertEqual(__magic_name__ , '''Unicode €.</s>''' ) snake_case_ : List[Any] = tokenizer('''e è é ê ë''' ) snake_case_ : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['''input_ids'''] , __magic_name__ ) # decoding snake_case_ : Optional[int] = tokenizer.decode(__magic_name__ ) self.assertEqual(__magic_name__ , '''e è é ê ë</s>''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = self.ta_base_tokenizer snake_case_ : str = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off snake_case_ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on snake_case_ : Optional[Any] = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) if FRAMEWORK != "jax": snake_case_ : Union[str, Any] = list(batch.input_ids.numpy()[0] ) else: snake_case_ : str = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = self.ta_base_tokenizer snake_case_ : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] snake_case_ : Optional[Any] = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , __magic_name__ ) self.assertIn('''attention_mask''' , __magic_name__ ) self.assertNotIn('''decoder_input_ids''' , __magic_name__ ) self.assertNotIn('''decoder_attention_mask''' , __magic_name__ ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.ta_base_tokenizer snake_case_ : Optional[int] = [ '''Summary of the text.''', '''Another summary.''', ] snake_case_ : str = tokenizer( text_target=__magic_name__ , max_length=32 , padding='''max_length''' , truncation=__magic_name__ , return_tensors=__magic_name__ ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.ta_base_tokenizer snake_case_ : List[str] = ['''A long paragraph for summarization. </s>'''] snake_case_ : Optional[Any] = ['''Summary of the text. </s>'''] # fmt: off snake_case_ : Dict = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] snake_case_ : str = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on snake_case_ : str = tokenizer(__magic_name__ , text_target=__magic_name__ ) self.assertEqual(__magic_name__ , batch['''input_ids'''][0] ) self.assertEqual(__magic_name__ , batch['''labels'''][0] ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test snake_case_ : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc snake_case_ : Tuple = tempfile.mkdtemp() snake_case_ : List[str] = ''' He is very happy, UNwant\u00E9d,running''' snake_case_ : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) snake_case_ : str = tokenizer.__class__.from_pretrained(__magic_name__ ) snake_case_ : Any = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) snake_case_ : Optional[int] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc snake_case_ : Optional[int] = tempfile.mkdtemp() snake_case_ : Dict = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) snake_case_ : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) snake_case_ : str = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) snake_case_ : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ ) snake_case_ : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) snake_case_ : List[str] = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : List[str] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: snake_case_ : str = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: snake_case_ : str = json.load(__magic_name__ ) snake_case_ : str = [F'''<extra_id_{i}>''' for i in range(125 )] snake_case_ : Optional[int] = added_tokens_extra_ids + [ '''an_additional_special_token''' ] snake_case_ : Optional[Any] = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(__magic_name__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files snake_case_ : List[Any] = tokenizer_class.from_pretrained( __magic_name__ , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained snake_case_ : Dict = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__magic_name__ )] snake_case_ : int = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) snake_case_ : Dict = tokenizer_class.from_pretrained(__magic_name__ ) self.assertTrue(tokenizer.decode([255] ) == '''''' ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> List[str]: '''simple docstring''' pass def lowerCamelCase (self ) -> Any: '''simple docstring''' pass def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = self.get_tokenizers(fast=__magic_name__ , do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : Dict = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>'''] snake_case_ : Optional[int] = tokenizer.convert_tokens_to_string(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : List[Any] = [ '''bos_token''', '''eos_token''', '''unk_token''', '''sep_token''', '''pad_token''', '''cls_token''', '''mask_token''', ] snake_case_ : List[str] = 0 snake_case_ : Any = tokenizer.convert_ids_to_tokens( __magic_name__ , skip_special_tokens=__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + '''_id''' , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + '''_id''' ) , __magic_name__ ) setattr(__magic_name__ , attr + '''_id''' , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + '''_id''' ) , __magic_name__ ) setattr(__magic_name__ , '''additional_special_tokens_ids''' , [] ) self.assertListEqual(getattr(__magic_name__ , '''additional_special_tokens''' ) , [] ) self.assertListEqual(getattr(__magic_name__ , '''additional_special_tokens_ids''' ) , [] ) setattr(__magic_name__ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] ) self.assertListEqual(getattr(__magic_name__ , '''additional_special_tokens''' ) , [token_to_test_setters] ) self.assertListEqual(getattr(__magic_name__ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
279
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Any = BioGptTokenizer lowerCamelCase_ : Optional[Any] = False def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : Optional[Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(__magic_name__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : str = '''lower newer''' snake_case_ : Dict = '''lower newer''' return input_text, output_text def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file ) snake_case_ : Union[str, Any] = '''lower''' snake_case_ : Optional[int] = ['''low''', '''er</w>'''] snake_case_ : Any = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = tokens + ['''<unk>'''] snake_case_ : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ ) snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
279
1
import csv import tweepy # Twitter API credentials lowerCAmelCase_ = '''''' lowerCAmelCase_ = '''''' lowerCAmelCase_ = '''''' lowerCAmelCase_ = '''''' def lowerCamelCase_ ( _UpperCamelCase ) -> None: """simple docstring""" snake_case_ : Optional[Any] = tweepy.OAuthHandler(_UpperCamelCase , _UpperCamelCase ) auth.set_access_token(_UpperCamelCase , _UpperCamelCase ) snake_case_ : Optional[int] = tweepy.API(_UpperCamelCase ) # initialize a list to hold all the tweepy Tweets snake_case_ : Union[str, Any] = [] # make initial request for most recent tweets (200 is the maximum allowed count) snake_case_ : int = api.user_timeline(screen_name=_UpperCamelCase , count=200 ) # save most recent tweets alltweets.extend(_UpperCamelCase ) # save the id of the oldest tweet less one snake_case_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(_UpperCamelCase ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates snake_case_ : Dict = api.user_timeline( screen_name=_UpperCamelCase , count=200 , max_id=_UpperCamelCase ) # save most recent tweets alltweets.extend(_UpperCamelCase ) # update the id of the oldest tweet less one snake_case_ : Optional[Any] = alltweets[-1].id - 1 print(f'''...{len(_UpperCamelCase )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv snake_case_ : Tuple = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f: snake_case_ : Union[str, Any] = csv.writer(_UpperCamelCase ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(_UpperCamelCase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
279
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]: """simple docstring""" snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) ) snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )] index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase ) snake_case_ : float = 0 snake_case_ : list[float] = [0] * len(_UpperCamelCase ) for i in index: if weight[i] <= capacity: snake_case_ : Dict = 1 max_value += value[i] capacity -= weight[i] else: snake_case_ : Union[str, Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
279
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase_ = { '''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FalconForCausalLM''', '''FalconModel''', '''FalconPreTrainedModel''', '''FalconForSequenceClassification''', '''FalconForTokenClassification''', '''FalconForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
279
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict: '''simple docstring''' snake_case_ : Tuple = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Union[str, Any] = num_channels snake_case_ : Optional[Any] = image_size snake_case_ : int = min_resolution snake_case_ : Any = max_resolution snake_case_ : Tuple = do_resize snake_case_ : str = size_divisor snake_case_ : Optional[Any] = do_rescale def lowerCamelCase (self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = GLPNImageProcessingTester(self ) @property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) ) self.assertTrue(hasattr(__magic_name__ , '''resample''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
279
1
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> tuple: """simple docstring""" if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
279
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
279
1
import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float: """simple docstring""" snake_case_ : Dict = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCamelCase )] ) snake_case_ : Tuple = np.array(_UpperCamelCase ) snake_case_ : Dict = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCamelCase ) ) , x.transpose() ) , _UpperCamelCase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float: """simple docstring""" snake_case_ : Any = (1, 2, 1) snake_case_ : Optional[int] = (1, 1, 0, 7) snake_case_ : Any = SARIMAX( _UpperCamelCase , exog=_UpperCamelCase , order=_UpperCamelCase , seasonal_order=_UpperCamelCase ) snake_case_ : Optional[int] = model.fit(disp=_UpperCamelCase , maxiter=600 , method='''nm''' ) snake_case_ : Union[str, Any] = model_fit.predict(1 , len(_UpperCamelCase ) , exog=[test_match] ) return result[0] def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float: """simple docstring""" snake_case_ : List[Any] = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(_UpperCamelCase , _UpperCamelCase ) snake_case_ : Tuple = regressor.predict(_UpperCamelCase ) return y_pred[0] def lowerCamelCase_ ( _UpperCamelCase ) -> float: """simple docstring""" train_user.sort() snake_case_ : List[str] = np.percentile(_UpperCamelCase , 25 ) snake_case_ : Tuple = np.percentile(_UpperCamelCase , 75 ) snake_case_ : str = qa - qa snake_case_ : Any = qa - (iqr * 0.1) return low_lim def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bool: """simple docstring""" snake_case_ : Any = 0 snake_case_ : str = 0 for i in list_vote: if i > actual_result: snake_case_ : str = not_safe + 1 else: if abs(abs(_UpperCamelCase ) - abs(_UpperCamelCase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) lowerCAmelCase_ = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]] lowerCAmelCase_ = pd.DataFrame( data_input, columns=['''total_user''', '''total_even''', '''days'''] ) lowerCAmelCase_ = Normalizer().fit_transform(data_input_df.values) # split data lowerCAmelCase_ = normalize_df[:, 2].tolist() lowerCAmelCase_ = normalize_df[:, 0].tolist() lowerCAmelCase_ = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) lowerCAmelCase_ = normalize_df[:, [1, 2]].tolist() lowerCAmelCase_ = x[: len(x) - 1] lowerCAmelCase_ = x[len(x) - 1 :] # for linear regression & sarimax lowerCAmelCase_ = total_date[: len(total_date) - 1] lowerCAmelCase_ = total_user[: len(total_user) - 1] lowerCAmelCase_ = total_match[: len(total_match) - 1] lowerCAmelCase_ = total_date[len(total_date) - 1 :] lowerCAmelCase_ = total_user[len(total_user) - 1 :] lowerCAmelCase_ = total_match[len(total_match) - 1 :] # voting system with forecasting lowerCAmelCase_ = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data lowerCAmelCase_ = '''''' if data_safety_checker(res_vote, tst_user) else '''not ''' print('''Today\'s data is {not_str}safe.''')
279
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase_ = float('''nan''') class __lowerCAmelCase : def __init__(self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : List[Any] = sys.stdout snake_case_ : int = open(__magic_name__ , '''a''' ) def __getattr__(self , __magic_name__ ) -> Dict: '''simple docstring''' return getattr(self.stdout , __magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' self.stdout.write(__magic_name__ ) # strip tqdm codes self.file.write(re.sub(R'''^.*\r''' , '''''' , __magic_name__ , 0 , re.M ) ) def lowerCamelCase_ ( _UpperCamelCase=80 , _UpperCamelCase=False ) -> str: """simple docstring""" snake_case_ : str = [] # deal with critical env vars snake_case_ : int = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: snake_case_ : Optional[int] = os.environ.get(_UpperCamelCase , _UpperCamelCase ) if val is not None: cmd.append(f'''{key}={val}''' ) # python executable (not always needed if the script is executable) snake_case_ : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(_UpperCamelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes snake_case_ : Dict = [] snake_case_ : Dict = '''''' while len(_UpperCamelCase ) > 0: current_line += f'''{cmd.pop(0 )} ''' if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(_UpperCamelCase ) snake_case_ : List[Any] = '''''' return "\\\n".join(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" snake_case_ : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd ) # remove --output_dir if any and set our own snake_case_ : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd ) args.base_cmd += f''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir snake_case_ : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , ) snake_case_ : Tuple = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase ) if verbose: print('''STDOUT''' , result.stdout ) print('''STDERR''' , result.stderr ) # save the streams snake_case_ : Any = variation.replace(''' ''' , '''-''' ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f: f.write(result.stdout ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f: snake_case_ : str = json.load(_UpperCamelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple: """simple docstring""" snake_case_ : Tuple = [] snake_case_ : Any = [] snake_case_ : int = f'''{id}: {variation:<{longest_variation_len}}''' snake_case_ : Optional[Any] = f'''{preamble}: ''' snake_case_ : Optional[int] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ): snake_case_ : int = process_run_single( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ : List[str] = single_run_metrics[target_metric_key] if not math.isnan(_UpperCamelCase ): metrics.append(_UpperCamelCase ) results.append(_UpperCamelCase ) outcome += "✓" else: outcome += "✘" snake_case_ : Any = f'''\33[2K\r{outcome}''' if len(_UpperCamelCase ) > 0: snake_case_ : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} snake_case_ : Any = round(mean_metrics[target_metric_key] , 2 ) snake_case_ : List[str] = f'''{outcome} {mean_target}''' if len(_UpperCamelCase ) > 1: results_str += f''' {tuple(round(_UpperCamelCase , 2 ) for x in results )}''' print(_UpperCamelCase ) snake_case_ : Optional[int] = variation return mean_metrics else: print(_UpperCamelCase ) return {variation_key: variation, target_metric_key: nan} def lowerCamelCase_ ( ) -> Optional[int]: """simple docstring""" snake_case_ : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return f''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : str = pd.DataFrame(_UpperCamelCase ) snake_case_ : Optional[int] = '''variation''' snake_case_ : Union[str, Any] = '''diff_%''' snake_case_ : Optional[int] = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan snake_case_ : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(_UpperCamelCase ): # as a fallback, use the minimal value as the sentinel snake_case_ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(_UpperCamelCase ): snake_case_ : Dict = df.apply( lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='''columns''' , ) # re-order columns snake_case_ : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys] snake_case_ : int = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols # capitalize snake_case_ : Optional[int] = df.rename(str.capitalize , axis='''columns''' ) # make the cols as narrow as possible snake_case_ : Any = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' ) snake_case_ : int = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' ) snake_case_ : Tuple = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] print('''\n\n'''.join(_UpperCamelCase ) ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) snake_case_ : Tuple = parser.parse_args() snake_case_ : Optional[Any] = args.output_dir Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) snake_case_ : Optional[int] = get_base_command(_UpperCamelCase , _UpperCamelCase ) # split each dimension into its --foo variations snake_case_ : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty snake_case_ : List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) ) snake_case_ : Optional[int] = max(len(_UpperCamelCase ) for x in variations ) # split wanted keys snake_case_ : int = args.report_metric_keys.split() # capture prints into a log file for convenience snake_case_ : str = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(f'''and this script\'s output is also piped into {report_fn}''' ) snake_case_ : Tuple = Tee(_UpperCamelCase ) print(f'''\n*** Running {len(_UpperCamelCase )} benchmarks:''' ) print(f'''Base command: {" ".join(_UpperCamelCase )}''' ) snake_case_ : List[Any] = '''variation''' snake_case_ : Tuple = [] for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ): snake_case_ : Optional[Any] = base_cmd + variation.split() results.append( process_run( id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) ) process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase ) if __name__ == "__main__": main()
279
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__=3 , __magic_name__=32 , __magic_name__=3 , __magic_name__=10 , __magic_name__=[8, 16, 32, 64] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , __magic_name__=["stage2", "stage3", "stage4"] , __magic_name__=[2, 3, 4] , __magic_name__=1 , ) -> List[str]: '''simple docstring''' snake_case_ : Any = parent snake_case_ : Any = batch_size snake_case_ : Union[str, Any] = image_size snake_case_ : List[str] = num_channels snake_case_ : Tuple = embeddings_size snake_case_ : Dict = hidden_sizes snake_case_ : List[str] = depths snake_case_ : Dict = is_training snake_case_ : Tuple = use_labels snake_case_ : Optional[Any] = hidden_act snake_case_ : List[Any] = num_labels snake_case_ : int = scope snake_case_ : Any = len(__magic_name__ ) snake_case_ : List[Any] = out_features snake_case_ : Optional[Any] = out_indices snake_case_ : List[Any] = num_groups def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Optional[Any] = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase (self ) -> List[str]: '''simple docstring''' return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = BitModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = self.num_labels snake_case_ : str = BitForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : str = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple: '''simple docstring''' snake_case_ : Any = BitBackbone(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Union[str, Any] = model(__magic_name__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case_ : Optional[int] = None snake_case_ : Optional[int] = BitBackbone(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : List[str] = model(__magic_name__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : int = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : Optional[Any] = config_and_inputs snake_case_ : Tuple = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _a, _a, unittest.TestCase ): lowerCamelCase_ : str = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () lowerCamelCase_ : Dict = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ : Optional[Any] = False lowerCamelCase_ : int = False lowerCamelCase_ : Optional[int] = False lowerCamelCase_ : str = False lowerCamelCase_ : List[Any] = False def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : List[Any] = BitModelTester(self ) snake_case_ : int = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return @unittest.skip(reason='''Bit does not output attentions''' ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(__magic_name__ ) snake_case_ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Any = [*signature.parameters.keys()] snake_case_ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__magic_name__ ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Dict = model_class(config=__magic_name__ ) for name, module in model.named_modules(): if isinstance(__magic_name__ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ): snake_case_ : Tuple = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): snake_case_ : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) snake_case_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case_ : Dict = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case_ : Optional[Any] = layer_type snake_case_ : List[str] = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' pass def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def lowerCamelCase (self ) -> List[str]: '''simple docstring''' for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[int] = BitModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__magic_name__ ) snake_case_ : List[Any] = self.default_image_processor snake_case_ : List[str] = prepare_img() snake_case_ : Any = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : List[Any] = model(**__magic_name__ ) # verify the logits snake_case_ : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) snake_case_ : str = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) ) @require_torch class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Dict = (BitBackbone,) if is_torch_available() else () lowerCamelCase_ : Dict = BitConfig lowerCamelCase_ : Union[str, Any] = False def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[int] = BitModelTester(self )
279
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowerCAmelCase_ = CLIPImageProcessor() lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') lowerCAmelCase_ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
279
1
import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = False , _UpperCamelCase = 100 , _UpperCamelCase = 0.01 , _UpperCamelCase = 1 , ) -> Any: """simple docstring""" snake_case_ : Any = False snake_case_ : Tuple = search_prob snake_case_ : Any = start_temperate snake_case_ : Dict = [] snake_case_ : Union[str, Any] = 0 snake_case_ : List[Any] = None while not search_end: snake_case_ : str = current_state.score() if best_state is None or current_score > best_state.score(): snake_case_ : List[str] = current_state scores.append(_UpperCamelCase ) iterations += 1 snake_case_ : List[str] = None snake_case_ : int = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to snake_case_ : int = random.randint(0 , len(_UpperCamelCase ) - 1 ) # picking a random neighbor snake_case_ : str = neighbors.pop(_UpperCamelCase ) snake_case_ : Dict = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: snake_case_ : Any = change * -1 # in case we are finding minimum if change > 0: # improves the solution snake_case_ : List[str] = picked_neighbor else: snake_case_ : Union[str, Any] = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability snake_case_ : Optional[int] = picked_neighbor snake_case_ : Optional[Any] = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor snake_case_ : List[Any] = True else: snake_case_ : str = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(_UpperCamelCase ) , _UpperCamelCase ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) lowerCAmelCase_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing( prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) lowerCAmelCase_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing( prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" return (3 * x**2) - (6 * y) lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' ) lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' )
279
from math import factorial lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)} def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) ) def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length snake_case_ : Optional[Any] = 0 # the cached sizes of the previous chains snake_case_ : dict[int, int] = {} for start_chain_element in range(1 , _UpperCamelCase ): # The temporary set will contain the elements of the chain snake_case_ : List[str] = set() snake_case_ : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. snake_case_ : Any = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_UpperCamelCase ) chain_set_length += 1 snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] snake_case_ : List[str] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution()}''')
279
1
def lowerCamelCase_ ( _UpperCamelCase = 10 ) -> str: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ) or n < 0: raise ValueError('''Invalid input''' ) snake_case_ : str = 10**n snake_case_ : str = 28_433 * (pow(2 , 7_830_457 , _UpperCamelCase )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(1_0) = }''')
279
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''''' lowerCamelCase_ : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) lowerCamelCase_ : str = None # compression type in fsspec. ex: "gzip" lowerCamelCase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__(self , __magic_name__ = "" , __magic_name__ = None , __magic_name__ = None , **__magic_name__ ) -> Any: '''simple docstring''' super().__init__(self , **__magic_name__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case_ : Union[str, Any] = fsspec.open( __magic_name__ , mode='''rb''' , protocol=__magic_name__ , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) snake_case_ : Tuple = os.path.basename(self.file.path.split('''::''' )[0] ) snake_case_ : Optional[Any] = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) snake_case_ : Dict = None @classmethod def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]: '''simple docstring''' return super()._strip_protocol(__magic_name__ ).lstrip('''/''' ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' if self.dir_cache is None: snake_case_ : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} snake_case_ : List[str] = {f['''name''']: f} def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return self.file.open().read() def lowerCamelCase (self , __magic_name__ , __magic_name__ = "rb" , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = self._strip_protocol(__magic_name__ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''bz2''' lowerCamelCase_ : Any = '''bz2''' lowerCamelCase_ : int = '''.bz2''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''gzip''' lowerCamelCase_ : Dict = '''gzip''' lowerCamelCase_ : int = '''.gz''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''lz4''' lowerCamelCase_ : Any = '''lz4''' lowerCamelCase_ : Optional[Any] = '''.lz4''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''xz''' lowerCamelCase_ : Any = '''xz''' lowerCamelCase_ : int = '''.xz''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''zstd''' lowerCamelCase_ : Tuple = '''zstd''' lowerCamelCase_ : Any = '''.zst''' def __init__(self , __magic_name__ , __magic_name__ = "rb" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = DEFAULT_BLOCK_SIZE , **__magic_name__ , ) -> Tuple: '''simple docstring''' super().__init__( fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case_ : Dict = self.file.__enter__ class __lowerCAmelCase : def __init__(self , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : str = file_ def __enter__(self ) -> List[Any]: '''simple docstring''' self._file.__enter__() return self def __exit__(self , *__magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' self._file.__exit__(*__magic_name__ , **__magic_name__ ) def __iter__(self ) -> Optional[int]: '''simple docstring''' return iter(self._file ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return next(self._file ) def __getattr__(self , __magic_name__ ) -> str: '''simple docstring''' return getattr(self._file , __magic_name__ ) def fixed_enter(*__magic_name__ , **__magic_name__ ): return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) ) snake_case_ : Tuple = fixed_enter
279
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self , __magic_name__ ) -> List[Any]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): snake_case_ : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(__magic_name__ ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : str = '''sshleifer/tiny-gpt2''' snake_case_ : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) snake_case_ : Any = PyTorchBenchmark(__magic_name__ ) snake_case_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Tuple = '''sgugger/tiny-distilbert-classification''' snake_case_ : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , ) snake_case_ : List[str] = PyTorchBenchmark(__magic_name__ ) snake_case_ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : int = '''sshleifer/tiny-gpt2''' snake_case_ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , torchscript=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) snake_case_ : int = PyTorchBenchmark(__magic_name__ ) snake_case_ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = '''sshleifer/tiny-gpt2''' snake_case_ : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , fpaa=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) snake_case_ : int = PyTorchBenchmark(__magic_name__ ) snake_case_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : str = '''sshleifer/tiny-gpt2''' snake_case_ : Any = AutoConfig.from_pretrained(__magic_name__ ) # set architectures equal to `None` snake_case_ : Optional[Any] = None snake_case_ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) snake_case_ : List[str] = PyTorchBenchmark(__magic_name__ , configs=[config] ) snake_case_ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : List[str] = '''sshleifer/tiny-gpt2''' snake_case_ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) snake_case_ : List[Any] = PyTorchBenchmark(__magic_name__ ) snake_case_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = '''sshleifer/tiny-gpt2''' snake_case_ : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__magic_name__ , multi_process=__magic_name__ , ) snake_case_ : List[Any] = PyTorchBenchmark(__magic_name__ ) snake_case_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = '''sshleifer/tiny-gpt2''' snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) snake_case_ : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) snake_case_ : Optional[int] = PyTorchBenchmark(__magic_name__ , configs=[config] ) snake_case_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = '''sshleifer/tinier_bart''' snake_case_ : Any = AutoConfig.from_pretrained(__magic_name__ ) snake_case_ : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) snake_case_ : Tuple = PyTorchBenchmark(__magic_name__ , configs=[config] ) snake_case_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : str = '''sshleifer/tiny-gpt2''' snake_case_ : Dict = AutoConfig.from_pretrained(__magic_name__ ) snake_case_ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) snake_case_ : Union[str, Any] = PyTorchBenchmark(__magic_name__ , configs=[config] ) snake_case_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = '''sshleifer/tinier_bart''' snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) snake_case_ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) snake_case_ : List[Any] = PyTorchBenchmark(__magic_name__ , configs=[config] ) snake_case_ : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : int = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(__magic_name__ , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(__magic_name__ , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(__magic_name__ , '''train_time.csv''' ) , env_info_csv_file=os.path.join(__magic_name__ , '''env.csv''' ) , multi_process=__magic_name__ , ) snake_case_ : List[str] = PyTorchBenchmark(__magic_name__ ) benchmark.run() self.assertTrue(Path(os.path.join(__magic_name__ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , '''env.csv''' ) ).exists() ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : str = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(__magic_name__ ): self.assertTrue(hasattr(__magic_name__ , '''sequential''' ) ) self.assertTrue(hasattr(__magic_name__ , '''cumulative''' ) ) self.assertTrue(hasattr(__magic_name__ , '''current''' ) ) self.assertTrue(hasattr(__magic_name__ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , '''log.txt''' ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , multi_process=__magic_name__ , ) snake_case_ : Union[str, Any] = PyTorchBenchmark(__magic_name__ ) snake_case_ : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(__magic_name__ , '''log.txt''' ) ).exists() )
279
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''megatron-bert''' def __init__(self , __magic_name__=2_9056 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=__magic_name__ , **__magic_name__ ) snake_case_ : Union[str, Any] = vocab_size snake_case_ : Dict = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : int = hidden_act snake_case_ : List[str] = intermediate_size snake_case_ : Dict = hidden_dropout_prob snake_case_ : str = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : Any = type_vocab_size snake_case_ : int = initializer_range snake_case_ : int = layer_norm_eps snake_case_ : List[str] = position_embedding_type snake_case_ : Dict = use_cache
279
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase_ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase_ = { '''facebook/bart-base''': 1_0_2_4, '''facebook/bart-large''': 1_0_2_4, '''facebook/bart-large-mnli''': 1_0_2_4, '''facebook/bart-large-cnn''': 1_0_2_4, '''facebook/bart-large-xsum''': 1_0_2_4, '''yjernite/bart_eli5''': 1_0_2_4, } class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[Any] = VOCAB_FILES_NAMES lowerCamelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : List[Any] = ['''input_ids''', '''attention_mask'''] lowerCamelCase_ : int = BartTokenizer def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="replace" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> List[Any]: '''simple docstring''' super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , ) snake_case_ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __magic_name__ ) != add_prefix_space: snake_case_ : int = getattr(__magic_name__ , pre_tok_state.pop('''type''' ) ) snake_case_ : Any = add_prefix_space snake_case_ : Any = pre_tok_class(**__magic_name__ ) snake_case_ : Dict = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case_ : Optional[Any] = '''post_processor''' snake_case_ : Tuple = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) if tokenizer_component_instance: snake_case_ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case_ : int = tuple(state['''sep'''] ) if "cls" in state: snake_case_ : int = tuple(state['''cls'''] ) snake_case_ : List[str] = False if state.get('''add_prefix_space''' , __magic_name__ ) != add_prefix_space: snake_case_ : List[str] = add_prefix_space snake_case_ : int = True if state.get('''trim_offsets''' , __magic_name__ ) != trim_offsets: snake_case_ : str = trim_offsets snake_case_ : Optional[Any] = True if changes_to_apply: snake_case_ : Tuple = getattr(__magic_name__ , state.pop('''type''' ) ) snake_case_ : Any = component_class(**__magic_name__ ) setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) @property def lowerCamelCase (self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def lowerCamelCase (self , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value snake_case_ : List[str] = value def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding: '''simple docstring''' snake_case_ : Optional[int] = kwargs.get('''is_split_into_words''' , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding: '''simple docstring''' snake_case_ : Optional[int] = kwargs.get('''is_split_into_words''' , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]: '''simple docstring''' snake_case_ : Dict = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]: '''simple docstring''' snake_case_ : List[Any] = [self.sep_token_id] snake_case_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
279
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch lowerCAmelCase_ = random.Random() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]: """simple docstring""" if rng is None: snake_case_ : str = global_rng snake_case_ : Any = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=400 , __magic_name__=2000 , __magic_name__=10 , __magic_name__=160 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=4000 , __magic_name__=False , __magic_name__=True , ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = parent snake_case_ : str = batch_size snake_case_ : Union[str, Any] = min_seq_length snake_case_ : Tuple = max_seq_length snake_case_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case_ : Optional[int] = padding_value snake_case_ : Union[str, Any] = sampling_rate snake_case_ : Optional[int] = return_attention_mask snake_case_ : str = do_normalize snake_case_ : str = feature_size snake_case_ : Optional[Any] = chunk_length snake_case_ : Union[str, Any] = hop_length def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCamelCase (self , __magic_name__=False , __magic_name__=False ) -> Optional[Any]: '''simple docstring''' def _flatten(__magic_name__ ): return list(itertools.chain(*__magic_name__ ) ) if equal_length: snake_case_ : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size snake_case_ : int = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case_ : str = [np.asarray(__magic_name__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = WhisperFeatureExtractionTester(self ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Union[str, Any] = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) snake_case_ : List[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ ) snake_case_ : Optional[int] = feat_extract_first.to_dict() snake_case_ : Dict = feat_extract_second.to_dict() snake_case_ : List[str] = feat_extract_first.mel_filters snake_case_ : Union[str, Any] = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(__magic_name__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(__magic_name__ ) snake_case_ : Optional[int] = self.feature_extraction_class.from_json_file(__magic_name__ ) snake_case_ : int = feat_extract_first.to_dict() snake_case_ : Optional[int] = feat_extract_second.to_dict() snake_case_ : Union[str, Any] = feat_extract_first.mel_filters snake_case_ : str = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] snake_case_ : str = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] # Test feature size snake_case_ : str = feature_extractor(__magic_name__ , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input snake_case_ : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features snake_case_ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test batched snake_case_ : int = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] snake_case_ : List[str] = np.asarray(__magic_name__ ) snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : Dict = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test truncation required snake_case_ : Any = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] snake_case_ : Union[str, Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] snake_case_ : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs] snake_case_ : Optional[Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated] snake_case_ : Any = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) def lowerCamelCase (self ) -> int: '''simple docstring''' import torch snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa ) snake_case_ : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech snake_case_ : Optional[Any] = ds.sort('''id''' ).select(range(__magic_name__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : str = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on snake_case_ : List[Any] = self._load_datasamples(1 ) snake_case_ : Union[str, Any] = WhisperFeatureExtractor() snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1e-4 ) ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Optional[int] = self._load_datasamples(1 )[0] snake_case_ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue snake_case_ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0] self.assertTrue(np.all(np.mean(__magic_name__ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1e-3 ) )
279
1
def lowerCamelCase_ ( _UpperCamelCase ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) snake_case_ : int = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 snake_case_ : List[Any] = 1 if upper_limit > 0: snake_case_ : Union[str, Any] = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(_UpperCamelCase ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''') print('''\n*** Enter -1 at any time to quit ***''') print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''') try: while True: lowerCAmelCase_ = int(input().strip()) if N < 0: print('''\n********* Goodbye!! ************''') break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print('''Try another upper limit for the sequence: ''', end='''''') except (NameError, ValueError): print('''\n********* Invalid input, goodbye! ************\n''') import doctest doctest.testmod()
279
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase_ = logging.getLogger(__name__) def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" snake_case_ : List[str] = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=_UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=_UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=_UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=_UpperCamelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=_UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=_UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) snake_case_ : List[Any] = parser.parse_args() return args def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" def fn(_UpperCamelCase ): return tokenizer(examples['''text'''] ) return fn def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Any = [] for i in range(len(tokenized_data['''input_ids'''] ) ): snake_case_ : Any = { '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } snake_case_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase ) snake_case_ : Optional[Any] = tf.train.Example(features=_UpperCamelCase ) snake_case_ : Optional[Any] = example.SerializeToString() records.append(_UpperCamelCase ) return records def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , args.limit ) snake_case_ : int = dataset.select(range(_UpperCamelCase ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) snake_case_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) snake_case_ : str = os.path.join(args.output_dir , args.split ) if not os.path.exists(_UpperCamelCase ): os.makedirs(_UpperCamelCase ) else: snake_case_ : Optional[Any] = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. snake_case_ : Optional[Any] = tokenize_function(_UpperCamelCase ) snake_case_ : List[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(_UpperCamelCase ): # Concatenate all texts. snake_case_ : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()} snake_case_ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 snake_case_ : int = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. snake_case_ : Union[str, Any] = { k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )] for k, t in concatenated_examples.items() } return result snake_case_ : int = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1_000 , num_proc=4 ) snake_case_ : str = 0 snake_case_ : Optional[Any] = 0 for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ): snake_case_ : Any = grouped_dataset[shard : shard + args.shard_size] snake_case_ : str = len(dataset_snapshot['''input_ids'''] ) snake_case_ : Union[str, Any] = os.path.join(_UpperCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) snake_case_ : Dict = get_serialized_examples(_UpperCamelCase ) with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file: for i in range(len(_UpperCamelCase ) ): snake_case_ : List[str] = serialized_examples[i] out_file.write(_UpperCamelCase ) print('''Wrote file {} containing {} records'''.format(_UpperCamelCase , _UpperCamelCase ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f: print(f'''Total {args.split} records: {total_records}''' , file=_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = parse_args() main(args)
279
1
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : List[str] = [False] * len(_UpperCamelCase ) snake_case_ : Optional[int] = [] queue.append(_UpperCamelCase ) snake_case_ : List[Any] = True while queue: snake_case_ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCamelCase ) snake_case_ : str = True snake_case_ : Optional[int] = u return visited[t] def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : Optional[int] = [-1] * (len(_UpperCamelCase )) snake_case_ : Dict = 0 while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): snake_case_ : Optional[Any] = float('''Inf''' ) snake_case_ : str = sink while s != source: # Find the minimum value in select path snake_case_ : Optional[Any] = min(_UpperCamelCase , graph[parent[s]][s] ) snake_case_ : Dict = parent[s] max_flow += path_flow snake_case_ : List[str] = sink while v != source: snake_case_ : int = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow snake_case_ : Dict = parent[v] return max_flow lowerCAmelCase_ = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase_ , lowerCAmelCase_ = 0, 5 print(ford_fulkerson(graph, source, sink))
279
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Any = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case_ : List[Any] = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 ) snake_case_ : str = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' for example in examples: snake_case_ : Union[str, Any] = video_classifier(__magic_name__ ) self.assertEqual( __magic_name__ , [ {'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )}, {'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )}, ] , ) @require_torch def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Any = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' snake_case_ : str = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) snake_case_ : int = pipeline( '''video-classification''' , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 ) snake_case_ : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case_ : Union[str, Any] = video_classifier(__magic_name__ , top_k=2 ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , ) snake_case_ : int = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' pass
279
1
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } lowerCAmelCase_ = { '''b0''': { '''hidden_dim''': 1_2_8_0, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 2_2_4, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1_2_8_0, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 2_4_0, '''dropout_rate''': 0.2, '''dw_padding''': [1_6], }, '''b2''': { '''hidden_dim''': 1_4_0_8, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 2_6_0, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 1_6], }, '''b3''': { '''hidden_dim''': 1_5_3_6, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 3_0_0, '''dropout_rate''': 0.3, '''dw_padding''': [5, 1_8], }, '''b4''': { '''hidden_dim''': 1_7_9_2, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 3_8_0, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2_0_4_8, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 4_5_6, '''dropout_rate''': 0.4, '''dw_padding''': [1_3, 2_7], }, '''b6''': { '''hidden_dim''': 2_3_0_4, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 5_2_8, '''dropout_rate''': 0.5, '''dw_padding''': [3_1], }, '''b7''': { '''hidden_dim''': 2_5_6_0, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 6_0_0, '''dropout_rate''': 0.5, '''dw_padding''': [1_8], }, } def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" snake_case_ : List[str] = EfficientNetConfig() snake_case_ : Optional[int] = CONFIG_MAP[model_name]['''hidden_dim'''] snake_case_ : Dict = CONFIG_MAP[model_name]['''width_coef'''] snake_case_ : Optional[Any] = CONFIG_MAP[model_name]['''depth_coef'''] snake_case_ : Dict = CONFIG_MAP[model_name]['''image_size'''] snake_case_ : Union[str, Any] = CONFIG_MAP[model_name]['''dropout_rate'''] snake_case_ : Union[str, Any] = CONFIG_MAP[model_name]['''dw_padding'''] snake_case_ : Tuple = '''huggingface/label-files''' snake_case_ : str = '''imagenet-1k-id2label.json''' snake_case_ : Optional[int] = 1_000 snake_case_ : Any = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) snake_case_ : str = {int(_UpperCamelCase ): v for k, v in idalabel.items()} snake_case_ : str = idalabel snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" snake_case_ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Any = CONFIG_MAP[model_name]['''image_size'''] snake_case_ : int = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : str = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] snake_case_ : str = sorted(set(_UpperCamelCase ) ) snake_case_ : Optional[int] = len(_UpperCamelCase ) snake_case_ : Union[str, Any] = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} snake_case_ : str = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: snake_case_ : Union[str, Any] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) snake_case_ : List[Any] = {} for item in rename_keys: if item[0] in original_param_names: snake_case_ : Any = '''efficientnet.''' + item[1] snake_case_ : str = '''classifier.weight''' snake_case_ : Any = '''classifier.bias''' return key_mapping def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue snake_case_ : Optional[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: snake_case_ : Any = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: snake_case_ : List[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: snake_case_ : Tuple = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: snake_case_ : str = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" snake_case_ : Dict = model_classes[model_name]( include_top=_UpperCamelCase , weights='''imagenet''' , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_000 , classifier_activation='''softmax''' , ) snake_case_ : List[str] = original_model.trainable_variables snake_case_ : Optional[int] = original_model.non_trainable_variables snake_case_ : Dict = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: snake_case_ : Dict = param.numpy() snake_case_ : str = list(tf_params.keys() ) # Load HuggingFace model snake_case_ : Tuple = get_efficientnet_config(_UpperCamelCase ) snake_case_ : Tuple = EfficientNetForImageClassification(_UpperCamelCase ).eval() snake_case_ : Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) snake_case_ : Dict = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image snake_case_ : Any = convert_image_processor(_UpperCamelCase ) snake_case_ : Any = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): snake_case_ : Any = hf_model(**_UpperCamelCase ) snake_case_ : int = outputs.logits.detach().numpy() # Original model inference snake_case_ : List[str] = False snake_case_ : Optional[Any] = CONFIG_MAP[model_name]['''image_size'''] snake_case_ : Tuple = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) snake_case_ : Tuple = image.img_to_array(_UpperCamelCase ) snake_case_ : Optional[Any] = np.expand_dims(_UpperCamelCase , axis=0 ) snake_case_ : Dict = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) snake_case_ : Optional[Any] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') lowerCAmelCase_ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
279
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" return "\n".join( f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=1_0))
279
1
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowerCAmelCase_ = CLIPImageProcessor() lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') lowerCAmelCase_ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
279
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCAmelCase_ = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" snake_case_ : List[str] = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ : str = list(s_dict.keys() ) for key in keys: snake_case_ : Optional[int] = key for k, v in WHISPER_MAPPING.items(): if k in key: snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase ) print(f'''{key} -> {new_key}''' ) snake_case_ : Tuple = s_dict.pop(_UpperCamelCase ) return s_dict def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ , snake_case_ : Dict = emb.weight.shape snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase ) snake_case_ : Any = emb.weight.data return lin_layer def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes: """simple docstring""" os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) snake_case_ : List[Any] = os.path.basename(_UpperCamelCase ) snake_case_ : Any = url.split('''/''' )[-2] snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase ) if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ): raise RuntimeError(f'''{download_target} exists and is not a regular file''' ) if os.path.isfile(_UpperCamelCase ): snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read() if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' ) with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop: while True: snake_case_ : Dict = source.read(8_192 ) if not buffer: break output.write(_UpperCamelCase ) loop.update(len(_UpperCamelCase ) ) snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read() if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" if ".pt" not in checkpoint_path: snake_case_ : str = _download(_MODELS[checkpoint_path] ) else: snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' ) snake_case_ : int = original_checkpoint['''dims'''] snake_case_ : List[str] = original_checkpoint['''model_state_dict'''] snake_case_ : str = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(_UpperCamelCase ) rename_keys(_UpperCamelCase ) snake_case_ : Optional[int] = True snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] snake_case_ : List[str] = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase ) if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f''' but all the following weights are missing {missing}''' ) if tie_embeds: snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case_ : Any = proj_out_weights model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCAmelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
279
1
from string import ascii_uppercase lowerCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)} lowerCAmelCase_ = dict(enumerate(ascii_uppercase)) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Any = len(_UpperCamelCase ) snake_case_ : Dict = 0 while True: if x == i: snake_case_ : int = 0 if len(_UpperCamelCase ) == len(_UpperCamelCase ): break key += key[i] i += 1 return key def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : int = '''''' snake_case_ : Dict = 0 for letter in message: if letter == " ": cipher_text += " " else: snake_case_ : List[str] = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Optional[int] = '''''' snake_case_ : Optional[Any] = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: snake_case_ : List[str] = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def lowerCamelCase_ ( ) -> None: """simple docstring""" snake_case_ : Optional[Any] = '''THE GERMAN ATTACK''' snake_case_ : Optional[int] = '''SECRET''' snake_case_ : Optional[int] = generate_key(_UpperCamelCase , _UpperCamelCase ) snake_case_ : Any = cipher_text(_UpperCamelCase , _UpperCamelCase ) print(f'''Encrypted Text = {s}''' ) print(f'''Original Text = {original_text(_UpperCamelCase , _UpperCamelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
279
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase_ = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase_ = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase_ = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase_ = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ , snake_case_ : Any = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) ) snake_case_ : Any = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] snake_case_ , snake_case_ : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase_ ( _UpperCamelCase = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(_UpperCamelCase )) @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : str = PokerHand(_UpperCamelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" assert PokerHand(_UpperCamelCase )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Dict = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS] snake_case_ : str = poker_hands.copy() shuffle(_UpperCamelCase ) snake_case_ : List[str] = chain(sorted(_UpperCamelCase ) ) for index, hand in enumerate(_UpperCamelCase ): assert hand == poker_hands[index] def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=_UpperCamelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Dict = PokerHand('''2C 4S AS 3D 5C''' ) snake_case_ : str = True snake_case_ : Tuple = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase_ ( ) -> List[str]: """simple docstring""" snake_case_ : List[str] = 0 snake_case_ : Union[str, Any] = os.path.abspath(os.path.dirname(_UpperCamelCase ) ) snake_case_ : Dict = os.path.join(_UpperCamelCase , '''poker_hands.txt''' ) with open(_UpperCamelCase ) as file_hand: for line in file_hand: snake_case_ : Dict = line[:14].strip() snake_case_ : List[str] = line[15:].strip() snake_case_ , snake_case_ : str = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase ) snake_case_ : int = player.compare_with(_UpperCamelCase ) if output == "Win": answer += 1 assert answer == 376
279
1
import unittest import numpy as np def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , ) -> np.ndarray: """simple docstring""" snake_case_ : List[Any] = np.shape(_UpperCamelCase ) snake_case_ : Dict = np.shape(_UpperCamelCase ) snake_case_ : Any = np.shape(_UpperCamelCase ) if shape_a[0] != shape_b[0]: snake_case_ : List[Any] = ( '''Expected the same number of rows for A and B. ''' f'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(_UpperCamelCase ) if shape_b[1] != shape_c[1]: snake_case_ : Optional[int] = ( '''Expected the same number of columns for B and C. ''' f'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(_UpperCamelCase ) snake_case_ : Tuple = pseudo_inv if a_inv is None: try: snake_case_ : Tuple = np.linalg.inv(_UpperCamelCase ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self ) -> None: '''simple docstring''' snake_case_ : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) snake_case_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) snake_case_ : Dict = np.array([[2, 1], [6, 3]] ) snake_case_ : Optional[Any] = schur_complement(__magic_name__ , __magic_name__ , __magic_name__ ) snake_case_ : List[Any] = np.block([[a, b], [b.T, c]] ) snake_case_ : Tuple = np.linalg.det(__magic_name__ ) snake_case_ : Union[str, Any] = np.linalg.det(__magic_name__ ) snake_case_ : List[str] = np.linalg.det(__magic_name__ ) self.assertAlmostEqual(__magic_name__ , det_a * det_s ) def lowerCamelCase (self ) -> None: '''simple docstring''' snake_case_ : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) snake_case_ : str = np.array([[0, 3], [3, 0], [2, 3]] ) snake_case_ : Any = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__magic_name__ ): schur_complement(__magic_name__ , __magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> None: '''simple docstring''' snake_case_ : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) snake_case_ : Any = np.array([[0, 3], [3, 0], [2, 3]] ) snake_case_ : Optional[int] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__magic_name__ ): schur_complement(__magic_name__ , __magic_name__ , __magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
279
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available lowerCAmelCase_ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : List[str] lowerCamelCase_ : Optional[List[str]] @dataclass class __lowerCAmelCase : lowerCamelCase_ : List[int] lowerCamelCase_ : List[int] lowerCamelCase_ : Optional[List[int]] = None lowerCamelCase_ : Optional[List[int]] = None class __lowerCAmelCase ( _a ): lowerCamelCase_ : str = '''train''' lowerCamelCase_ : List[str] = '''dev''' lowerCamelCase_ : List[Any] = '''test''' class __lowerCAmelCase : @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> List[InputExample]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ ) -> List[str]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__=1 , __magic_name__="[SEP]" , __magic_name__=False , __magic_name__=False , __magic_name__=0 , __magic_name__=0 , __magic_name__=-100 , __magic_name__=0 , __magic_name__=True , ) -> List[InputFeatures]: '''simple docstring''' snake_case_ : Optional[int] = {label: i for i, label in enumerate(__magic_name__ )} snake_case_ : Dict = [] for ex_index, example in enumerate(__magic_name__ ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' , __magic_name__ , len(__magic_name__ ) ) snake_case_ : List[str] = [] snake_case_ : List[str] = [] for word, label in zip(example.words , example.labels ): snake_case_ : Optional[Any] = tokenizer.tokenize(__magic_name__ ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(__magic_name__ ) > 0: tokens.extend(__magic_name__ ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__magic_name__ ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. snake_case_ : Union[str, Any] = tokenizer.num_special_tokens_to_add() if len(__magic_name__ ) > max_seq_length - special_tokens_count: snake_case_ : str = tokens[: (max_seq_length - special_tokens_count)] snake_case_ : Any = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] snake_case_ : Union[str, Any] = [sequence_a_segment_id] * len(__magic_name__ ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: snake_case_ : Union[str, Any] = [cls_token] + tokens snake_case_ : List[Any] = [pad_token_label_id] + label_ids snake_case_ : Optional[Any] = [cls_token_segment_id] + segment_ids snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. snake_case_ : int = [1 if mask_padding_with_zero else 0] * len(__magic_name__ ) # Zero-pad up to the sequence length. snake_case_ : Optional[int] = max_seq_length - len(__magic_name__ ) if pad_on_left: snake_case_ : Optional[Any] = ([pad_token] * padding_length) + input_ids snake_case_ : Optional[int] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask snake_case_ : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids snake_case_ : Dict = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''' ) logger.info('''guid: %s''' , example.guid ) logger.info('''tokens: %s''' , ''' '''.join([str(__magic_name__ ) for x in tokens] ) ) logger.info('''input_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_ids] ) ) logger.info('''input_mask: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_mask] ) ) logger.info('''segment_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in segment_ids] ) ) logger.info('''label_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : int = None features.append( InputFeatures( input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , label_ids=__magic_name__ ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = nn.CrossEntropyLoss().ignore_index def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = os.path.join( __magic_name__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__magic_name__ ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case_ : Dict = cached_features_file + '''.lock''' with FileLock(__magic_name__ ): if os.path.exists(__magic_name__ ) and not overwrite_cache: logger.info(F'''Loading features from cached file {cached_features_file}''' ) snake_case_ : Dict = torch.load(__magic_name__ ) else: logger.info(F'''Creating features from dataset file at {data_dir}''' ) snake_case_ : Any = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F'''Saving features into cached file {cached_features_file}''' ) torch.save(self.features , __magic_name__ ) def __len__(self ) -> Optional[Any]: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class __lowerCAmelCase : lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = -100 def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : Optional[Any] = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: snake_case_ : int = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] ), '''token_type_ids''': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__(self ) -> str: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i]
279
1
import pickle import numpy as np from matplotlib import pyplot as plt class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0.2 , __magic_name__=0.2 ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = bp_numa snake_case_ : Union[str, Any] = bp_numa snake_case_ : Dict = bp_numa snake_case_ : Optional[Any] = conva_get[:2] snake_case_ : int = conva_get[2] snake_case_ : Optional[int] = size_pa snake_case_ : int = rate_w snake_case_ : int = rate_t snake_case_ : Optional[int] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] snake_case_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) snake_case_ : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) snake_case_ : int = -2 * np.random.rand(self.conva[1] ) + 1 snake_case_ : Any = -2 * np.random.rand(self.num_bpa ) + 1 snake_case_ : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1 def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(__magic_name__ , '''wb''' ) as f: pickle.dump(__magic_name__ , __magic_name__ ) print(F'''Model saved: {save_path}''' ) @classmethod def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]: '''simple docstring''' with open(__magic_name__ , '''rb''' ) as f: snake_case_ : str = pickle.load(__magic_name__ ) # noqa: S301 snake_case_ : List[Any] = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) snake_case_ : str = model_dic.get('''size_pooling1''' ) snake_case_ : Any = model_dic.get('''num_bp1''' ) snake_case_ : Tuple = model_dic.get('''num_bp2''' ) snake_case_ : str = model_dic.get('''num_bp3''' ) snake_case_ : List[str] = model_dic.get('''rate_weight''' ) snake_case_ : Dict = model_dic.get('''rate_thre''' ) # create model instance snake_case_ : Optional[Any] = CNN(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # modify model parameter snake_case_ : Union[str, Any] = model_dic.get('''w_conv1''' ) snake_case_ : Union[str, Any] = model_dic.get('''wkj''' ) snake_case_ : int = model_dic.get('''vji''' ) snake_case_ : List[Any] = model_dic.get('''thre_conv1''' ) snake_case_ : Optional[Any] = model_dic.get('''thre_bp2''' ) snake_case_ : Optional[Any] = model_dic.get('''thre_bp3''' ) return conv_ins def lowerCamelCase (self , __magic_name__ ) -> str: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def lowerCamelCase (self , __magic_name__ ) -> Optional[int]: '''simple docstring''' return round(__magic_name__ , 3 ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : int = convs[0] snake_case_ : Dict = convs[1] snake_case_ : Optional[int] = np.shape(__magic_name__ )[0] # get the data slice of original image data, data_focus snake_case_ : Optional[Any] = [] for i_focus in range(0 , size_data - size_conv + 1 , __magic_name__ ): for j_focus in range(0 , size_data - size_conv + 1 , __magic_name__ ): snake_case_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__magic_name__ ) # calculate the feature map of every single kernel, and saved as list of matrix snake_case_ : Optional[int] = [] snake_case_ : List[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__magic_name__ ): snake_case_ : List[str] = [] for i_focus in range(len(__magic_name__ ) ): snake_case_ : Dict = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__magic_name__ ) ) snake_case_ : Any = np.asmatrix(__magic_name__ ).reshape( __magic_name__ , __magic_name__ ) data_featuremap.append(__magic_name__ ) # expanding the data slice to One dimenssion snake_case_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__magic_name__ ) ) snake_case_ : List[str] = np.asarray(__magic_name__ ) return focus_list, data_featuremap def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__="average_pool" ) -> List[str]: '''simple docstring''' snake_case_ : Any = len(featuremaps[0] ) snake_case_ : Dict = int(size_map / size_pooling ) snake_case_ : Dict = [] for i_map in range(len(__magic_name__ ) ): snake_case_ : List[str] = featuremaps[i_map] snake_case_ : List[Any] = [] for i_focus in range(0 , __magic_name__ , __magic_name__ ): for j_focus in range(0 , __magic_name__ , __magic_name__ ): snake_case_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__magic_name__ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__magic_name__ ) ) snake_case_ : Any = np.asmatrix(__magic_name__ ).reshape(__magic_name__ , __magic_name__ ) featuremap_pooled.append(__magic_name__ ) return featuremap_pooled def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : int = [] for i in range(len(__magic_name__ ) ): snake_case_ : Optional[int] = np.shape(data[i] ) snake_case_ : int = data[i].reshape(1 , shapes[0] * shapes[1] ) snake_case_ : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(__magic_name__ ) snake_case_ : Dict = np.asarray(__magic_name__ ) return data_expanded def lowerCamelCase (self , __magic_name__ ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = np.asarray(__magic_name__ ) snake_case_ : Optional[int] = np.shape(__magic_name__ ) snake_case_ : Union[str, Any] = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = [] snake_case_ : Dict = 0 for i_map in range(__magic_name__ ): snake_case_ : List[Any] = np.ones((size_map, size_map) ) for i in range(0 , __magic_name__ , __magic_name__ ): for j in range(0 , __magic_name__ , __magic_name__ ): snake_case_ : Optional[int] = pd_pool[ i_pool ] snake_case_ : int = i_pool + 1 snake_case_ : Optional[int] = np.multiply( __magic_name__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(__magic_name__ ) return pd_all def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=bool ) -> Optional[int]: '''simple docstring''' print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(__magic_name__ )) ) print((''' - - Shape: Teach_Data ''', np.shape(__magic_name__ )) ) snake_case_ : int = 0 snake_case_ : Union[str, Any] = [] snake_case_ : List[str] = 1_0000 while rp < n_repeat and mse >= error_accuracy: snake_case_ : Dict = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(__magic_name__ ) ): # print('------------Learning Image: %d--------------'%p) snake_case_ : Optional[int] = np.asmatrix(datas_train[p] ) snake_case_ : List[str] = np.asarray(datas_teach[p] ) snake_case_ , snake_case_ : Any = self.convolute( __magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case_ : List[str] = self.pooling(__magic_name__ , self.size_poolinga ) snake_case_ : str = np.shape(__magic_name__ ) snake_case_ : List[str] = self._expand(__magic_name__ ) snake_case_ : Optional[int] = data_bp_input snake_case_ : Tuple = np.dot(__magic_name__ , self.vji.T ) - self.thre_bpa snake_case_ : List[Any] = self.sig(__magic_name__ ) snake_case_ : Union[str, Any] = np.dot(__magic_name__ , self.wkj.T ) - self.thre_bpa snake_case_ : Tuple = self.sig(__magic_name__ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- snake_case_ : str = np.multiply( (data_teach - bp_outa) , np.multiply(__magic_name__ , (1 - bp_outa) ) ) snake_case_ : Union[str, Any] = np.multiply( np.dot(__magic_name__ , self.wkj ) , np.multiply(__magic_name__ , (1 - bp_outa) ) ) snake_case_ : Tuple = np.dot(__magic_name__ , self.vji ) snake_case_ : List[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga) snake_case_ : List[Any] = pd_conva_pooled.T.getA().tolist() snake_case_ : str = self._calculate_gradient_from_pool( __magic_name__ , __magic_name__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): snake_case_ : Optional[int] = self._expand_mat(pd_conva_all[k_conv] ) snake_case_ : Union[str, Any] = self.rate_weight * np.dot(__magic_name__ , __magic_name__ ) snake_case_ : List[str] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) snake_case_ : List[str] = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer snake_case_ : Dict = self.wkj + pd_k_all.T * bp_outa * self.rate_weight snake_case_ : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight snake_case_ : Any = self.thre_bpa - pd_k_all * self.rate_thre snake_case_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image snake_case_ : int = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) snake_case_ : Optional[Any] = rp + 1 snake_case_ : Union[str, Any] = error_count / patterns all_mse.append(__magic_name__ ) def draw_error(): snake_case_ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__magic_name__ , '''+-''' ) plt.plot(__magic_name__ , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(__magic_name__ , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(__magic_name__ )) ) for p in range(len(__magic_name__ ) ): snake_case_ : Optional[int] = np.asmatrix(datas_test[p] ) snake_case_ , snake_case_ : int = self.convolute( __magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case_ : Union[str, Any] = self.pooling(__magic_name__ , self.size_poolinga ) snake_case_ : List[Any] = self._expand(__magic_name__ ) snake_case_ : Optional[Any] = data_bp_input snake_case_ : int = bp_outa * self.vji.T - self.thre_bpa snake_case_ : List[Any] = self.sig(__magic_name__ ) snake_case_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa snake_case_ : Tuple = self.sig(__magic_name__ ) produce_out.extend(bp_outa.getA().tolist() ) snake_case_ : List[str] = [list(map(self.do_round , __magic_name__ ) ) for each in produce_out] return np.asarray(__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Tuple: '''simple docstring''' snake_case_ : Dict = np.asmatrix(__magic_name__ ) snake_case_ , snake_case_ : Union[str, Any] = self.convolute( __magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case_ : Optional[Any] = self.pooling(__magic_name__ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
279
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[int] = SpeechTaTokenizer lowerCamelCase_ : int = False lowerCamelCase_ : Dict = True def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case_ : Tuple = SpeechTaTokenizer(__magic_name__ ) snake_case_ : Any = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ ) snake_case_ : int = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Dict = '''this is a test''' snake_case_ : int = '''this is a test''' return input_text, output_text def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> List[Any]: '''simple docstring''' snake_case_ , snake_case_ : int = self.get_input_output_texts(__magic_name__ ) snake_case_ : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) snake_case_ : Any = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) return text, ids def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = '''<pad>''' snake_case_ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(__magic_name__ ) , 81 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : int = tokenizer.vocab_size snake_case_ : Optional[Any] = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ : List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] snake_case_ : List[Any] = tokenizer.add_tokens(__magic_name__ ) snake_case_ : Dict = tokenizer.vocab_size snake_case_ : Optional[Any] = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) ) snake_case_ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} snake_case_ : List[str] = tokenizer.add_special_tokens(__magic_name__ ) snake_case_ : Dict = tokenizer.vocab_size snake_case_ : Dict = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) ) snake_case_ : Tuple = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> List[str]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.get_tokenizer() snake_case_ : Optional[Any] = tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ ) # fmt: off self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = [ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off snake_case_ : List[Any] = { '''input_ids''': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
279
1
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder lowerCAmelCase_ = '''__DUMMY_TRANSFORMERS_USER__''' lowerCAmelCase_ = '''Dummy User''' lowerCAmelCase_ = '''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''' lowerCAmelCase_ = '''https://hub-ci.huggingface.co''' lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' lowerCAmelCase_ = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" monkeypatch.setattr( '''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , _UpperCamelCase ) @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , _UpperCamelCase ) monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , _UpperCamelCase ) @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , _UpperCamelCase ) @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" HfFolder.save_token(_UpperCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( ) -> int: """simple docstring""" return HfApi(endpoint=_UpperCamelCase ) @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" snake_case_ : List[Any] = HfFolder.get_token() HfFolder.save_token(_UpperCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_UpperCamelCase ) @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" def _cleanup_repo(_UpperCamelCase ): hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' ) return _cleanup_repo @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" @contextmanager def _temporary_repo(_UpperCamelCase ): try: yield repo_id finally: cleanup_repo(_UpperCamelCase ) return _temporary_repo @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : List[Any] = f'''repo_txt_data-{int(time.time() * 10E3 )}''' snake_case_ : Tuple = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase ) hf_api.upload_file( token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : Any = f'''repo_zipped_txt_data-{int(time.time() * 10E3 )}''' snake_case_ : List[Any] = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase ) hf_api.upload_file( token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Optional[int] = f'''repo_zipped_img_data-{int(time.time() * 10E3 )}''' snake_case_ : Union[str, Any] = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase ) hf_api.upload_file( token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return hf_private_dataset_repo_zipped_img_data_
279
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): snake_case_ : Dict = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(_UpperCamelCase ): # looping through rows of graph array for i in range(_UpperCamelCase ): # looping through columns of graph array for j in range(_UpperCamelCase ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): snake_case_ : List[Any] = dist[i][k] + dist[k][j] _print_dist(_UpperCamelCase , _UpperCamelCase ) return dist, v if __name__ == "__main__": lowerCAmelCase_ = int(input('''Enter number of vertices: ''')) lowerCAmelCase_ = int(input('''Enter number of edges: ''')) lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)] for i in range(v): lowerCAmelCase_ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('''\nEdge ''', i + 1) lowerCAmelCase_ = int(input('''Enter source:''')) lowerCAmelCase_ = int(input('''Enter destination:''')) lowerCAmelCase_ = float(input('''Enter weight:''')) lowerCAmelCase_ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
279
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" snake_case_ : Dict = DPTConfig(embedding_type='''hybrid''' ) if "large" in checkpoint_url: snake_case_ : str = 1_024 snake_case_ : List[str] = 4_096 snake_case_ : List[str] = 24 snake_case_ : Union[str, Any] = 16 snake_case_ : Union[str, Any] = [5, 11, 17, 23] snake_case_ : List[str] = [256, 512, 1_024, 1_024] snake_case_ : List[Any] = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: snake_case_ : str = 768 snake_case_ : Optional[Any] = [1, 1, 1, 0.5] snake_case_ : Optional[int] = [256, 512, 768, 768] snake_case_ : List[Any] = 150 snake_case_ : Dict = 16 snake_case_ : List[str] = (1, 384, 384) snake_case_ : Tuple = False snake_case_ : Any = '''project''' if "ade" in checkpoint_url: snake_case_ : Any = True snake_case_ : Optional[Any] = 768 snake_case_ : List[str] = [1, 1, 1, 0.5] snake_case_ : Dict = 150 snake_case_ : Optional[int] = 16 snake_case_ : int = '''huggingface/label-files''' snake_case_ : Dict = '''ade20k-id2label.json''' snake_case_ : Any = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) ) snake_case_ : List[str] = {int(_UpperCamelCase ): v for k, v in idalabel.items()} snake_case_ : List[str] = idalabel snake_case_ : List[Any] = {v: k for k, v in idalabel.items()} snake_case_ : int = [1, 150, 480, 480] return config, expected_shape def lowerCamelCase_ ( _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : Tuple = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): snake_case_ : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.encoder''' ) if "pretrained.model" in name: snake_case_ : Any = name.replace('''pretrained.model''' , '''dpt.embeddings''' ) if "patch_embed" in name: snake_case_ : str = name.replace('''patch_embed''' , '''''' ) if "pos_embed" in name: snake_case_ : Dict = name.replace('''pos_embed''' , '''position_embeddings''' ) if "attn.proj" in name: snake_case_ : Dict = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "proj" in name and "project" not in name: snake_case_ : Tuple = name.replace('''proj''' , '''projection''' ) if "blocks" in name: snake_case_ : Any = name.replace('''blocks''' , '''layer''' ) if "mlp.fc1" in name: snake_case_ : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: snake_case_ : Dict = name.replace('''mlp.fc2''' , '''output.dense''' ) if "norm1" in name and "backbone" not in name: snake_case_ : List[Any] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name and "backbone" not in name: snake_case_ : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' ) if "scratch.output_conv" in name: snake_case_ : str = name.replace('''scratch.output_conv''' , '''head''' ) if "scratch" in name: snake_case_ : List[Any] = name.replace('''scratch''' , '''neck''' ) if "layer1_rn" in name: snake_case_ : List[Any] = name.replace('''layer1_rn''' , '''convs.0''' ) if "layer2_rn" in name: snake_case_ : List[str] = name.replace('''layer2_rn''' , '''convs.1''' ) if "layer3_rn" in name: snake_case_ : Dict = name.replace('''layer3_rn''' , '''convs.2''' ) if "layer4_rn" in name: snake_case_ : List[Any] = name.replace('''layer4_rn''' , '''convs.3''' ) if "refinenet" in name: snake_case_ : Dict = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 snake_case_ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: snake_case_ : List[str] = name.replace('''out_conv''' , '''projection''' ) if "resConfUnit1" in name: snake_case_ : int = name.replace('''resConfUnit1''' , '''residual_layer1''' ) if "resConfUnit2" in name: snake_case_ : Optional[int] = name.replace('''resConfUnit2''' , '''residual_layer2''' ) if "conv1" in name: snake_case_ : Any = name.replace('''conv1''' , '''convolution1''' ) if "conv2" in name: snake_case_ : Optional[Any] = name.replace('''conv2''' , '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: snake_case_ : Union[str, Any] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: snake_case_ : Union[str, Any] = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: snake_case_ : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: snake_case_ : str = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: snake_case_ : Dict = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: snake_case_ : Dict = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: snake_case_ : Optional[Any] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: snake_case_ : Optional[Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: snake_case_ : Optional[int] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: snake_case_ : Any = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: snake_case_ : str = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: snake_case_ : List[Any] = name.replace('''pretrained''' , '''dpt''' ) if "bn" in name: snake_case_ : int = name.replace('''bn''' , '''batch_norm''' ) if "head" in name: snake_case_ : str = name.replace('''head''' , '''head.head''' ) if "encoder.norm" in name: snake_case_ : List[Any] = name.replace('''encoder.norm''' , '''layernorm''' ) if "auxlayer" in name: snake_case_ : Any = name.replace('''auxlayer''' , '''auxiliary_head.head''' ) if "backbone" in name: snake_case_ : List[str] = name.replace('''backbone''' , '''backbone.bit.encoder''' ) if ".." in name: snake_case_ : int = name.replace('''..''' , '''.''' ) if "stem.conv" in name: snake_case_ : int = name.replace('''stem.conv''' , '''bit.embedder.convolution''' ) if "blocks" in name: snake_case_ : List[str] = name.replace('''blocks''' , '''layers''' ) if "convolution" in name and "backbone" in name: snake_case_ : str = name.replace('''convolution''' , '''conv''' ) if "layer" in name and "backbone" in name: snake_case_ : List[Any] = name.replace('''layer''' , '''layers''' ) if "backbone.bit.encoder.bit" in name: snake_case_ : Union[str, Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' ) if "embedder.conv" in name: snake_case_ : Optional[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' ) if "backbone.bit.encoder.stem.norm" in name: snake_case_ : List[Any] = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' ) return name def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ : Dict = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) snake_case_ : Any = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : List[Any] = in_proj_weight[: config.hidden_size, :] snake_case_ : int = in_proj_bias[: config.hidden_size] snake_case_ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ : Tuple = in_proj_weight[ -config.hidden_size :, : ] snake_case_ : Dict = in_proj_bias[-config.hidden_size :] def lowerCamelCase_ ( ) -> Optional[int]: """simple docstring""" snake_case_ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ : Dict = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" snake_case_ , snake_case_ : Dict = get_dpt_config(_UpperCamelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(_UpperCamelCase ) # rename keys for key in state_dict.copy().keys(): snake_case_ : Dict = state_dict.pop(_UpperCamelCase ) snake_case_ : Any = val # read in qkv matrices read_in_q_k_v(_UpperCamelCase , _UpperCamelCase ) # load HuggingFace model snake_case_ : str = DPTForSemanticSegmentation(_UpperCamelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) model.eval() # Check outputs on an image snake_case_ : Tuple = 480 if '''ade''' in checkpoint_url else 384 snake_case_ : List[Any] = DPTImageProcessor(size=_UpperCamelCase ) snake_case_ : Any = prepare_img() snake_case_ : Optional[int] = image_processor(_UpperCamelCase , return_tensors='''pt''' ) # forward pass snake_case_ : Optional[int] = model(**_UpperCamelCase ).logits if '''ade''' in checkpoint_url else model(**_UpperCamelCase ).predicted_depth if show_prediction: snake_case_ : Dict = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_UpperCamelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_UpperCamelCase ) if push_to_hub: model.push_to_hub('''ybelkada/dpt-hybrid-midas''' ) image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) parser.add_argument( '''--show_prediction''', action='''store_true''', ) lowerCAmelCase_ = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
279
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class __lowerCAmelCase : def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' return None class __lowerCAmelCase : def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' return None class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ ) @require_torch @slow def lowerCamelCase (self ) -> int: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ ) @require_torch @slow def lowerCamelCase (self ) -> int: '''simple docstring''' from transformers import BertModel snake_case_ : str = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(__magic_name__ ) ) vocab_file.flush() snake_case_ : Optional[Any] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: snake_case_ : str = BertModel(BertConfig(vocab_size=len(__magic_name__ ) ) ) model.save_pretrained(__magic_name__ ) self._test_export(__magic_name__ , '''pt''' , 12 , __magic_name__ ) @require_tf @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: snake_case_ : Tuple = self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ ) snake_case_ : List[str] = quantize(Path(__magic_name__ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: snake_case_ : Any = self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ ) snake_case_ : Any = quantize(__magic_name__ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: snake_case_ : List[str] = Path(__magic_name__ ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) return path except Exception as e: self.fail(__magic_name__ ) @require_torch @require_tokenizers @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' from transformers import BertModel snake_case_ : Optional[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) snake_case_ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''pt''' ) @require_tf @require_tokenizers @slow def lowerCamelCase (self ) -> List[str]: '''simple docstring''' from transformers import TFBertModel snake_case_ : Any = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) snake_case_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''tf''' ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : Tuple = FeatureExtractionPipeline(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = infer_shapes(__magic_name__ , __magic_name__ ) # Assert all variables are present self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __magic_name__ ) self.assertSequenceEqual(variable_names[3:] , __magic_name__ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] snake_case_ : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} snake_case_ , snake_case_ : Tuple = ensure_valid_input(FuncContiguousArgs() , __magic_name__ , __magic_name__ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__magic_name__ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__magic_name__ ) , set(__magic_name__ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__magic_name__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) snake_case_ , snake_case_ : Dict = ensure_valid_input(FuncNonContiguousArgs() , __magic_name__ , __magic_name__ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
279
1
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : str = None @staticmethod def lowerCamelCase () -> List[Any]: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self , __magic_name__ ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def lowerCamelCase (self ) -> str: '''simple docstring''' if not self.is_available(): raise RuntimeError( F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase (cls ) -> Dict: '''simple docstring''' return F'''`pip install {cls.pip_package or cls.name}`''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Dict = '''optuna''' @staticmethod def lowerCamelCase () -> Dict: '''simple docstring''' return is_optuna_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> List[Any]: '''simple docstring''' return default_hp_space_optuna(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''ray''' lowerCamelCase_ : int = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase () -> List[Any]: '''simple docstring''' return is_ray_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> str: '''simple docstring''' return default_hp_space_ray(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Dict = '''sigopt''' @staticmethod def lowerCamelCase () -> int: '''simple docstring''' return is_sigopt_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' return default_hp_space_sigopt(__magic_name__ ) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''wandb''' @staticmethod def lowerCamelCase () -> Dict: '''simple docstring''' return is_wandb_available() def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return default_hp_space_wandb(__magic_name__ ) lowerCAmelCase_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Optional[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_UpperCamelCase ) > 0: snake_case_ : List[Any] = available_backends[0].name if len(_UpperCamelCase ) > 1: logger.info( f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( '''No hyperparameter search backend available.\n''' + '''\n'''.join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
279
lowerCAmelCase_ = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case_ : str = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {", ".join(_UpperCamelCase )}''' ) raise ValueError(_UpperCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
279
1
class __lowerCAmelCase : def __init__(self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = {} def lowerCamelCase (self ) -> None: '''simple docstring''' print(self.vertex ) for i in self.vertex: print(__magic_name__ , ''' -> ''' , ''' -> '''.join([str(__magic_name__ ) for j in self.vertex[i]] ) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None: '''simple docstring''' if from_vertex in self.vertex: self.vertex[from_vertex].append(__magic_name__ ) else: # else make a new vertex snake_case_ : Union[str, Any] = [to_vertex] def lowerCamelCase (self ) -> None: '''simple docstring''' snake_case_ : str = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None: '''simple docstring''' snake_case_ : Dict = True print(__magic_name__ , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__magic_name__ , __magic_name__ ) if __name__ == "__main__": lowerCAmelCase_ = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('''DFS:''') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
279
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets lowerCAmelCase_ = datasets.logging.get_logger(__name__) lowerCAmelCase_ = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' lowerCAmelCase_ = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' lowerCAmelCase_ = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' lowerCAmelCase_ = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , ) def lowerCamelCase (self , __magic_name__ ) -> List[Any]: '''simple docstring''' if self.config_name == "default": logger.warning( '''Using default BLEURT-Base checkpoint for sequence maximum length 128. ''' '''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' ) snake_case_ : Dict = '''bleurt-base-128''' if self.config_name.lower() in CHECKPOINT_URLS: snake_case_ : Optional[int] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: snake_case_ : Union[str, Any] = self.config_name.upper() else: raise KeyError( F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ ) return {"scores": scores}
279
1
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> float: """simple docstring""" if digit_amount > 0: return round(number - int(_UpperCamelCase ) , _UpperCamelCase ) return number - int(_UpperCamelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
279
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''): lowerCAmelCase_ = { '''linear''': PIL.Image.Resampling.BILINEAR, '''bilinear''': PIL.Image.Resampling.BILINEAR, '''bicubic''': PIL.Image.Resampling.BICUBIC, '''lanczos''': PIL.Image.Resampling.LANCZOS, '''nearest''': PIL.Image.Resampling.NEAREST, } else: lowerCAmelCase_ = { '''linear''': PIL.Image.LINEAR, '''bilinear''': PIL.Image.BILINEAR, '''bicubic''': PIL.Image.BICUBIC, '''lanczos''': PIL.Image.LANCZOS, '''nearest''': PIL.Image.NEAREST, } def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Dict = (images / 2 + 0.5).clamp(0 , 1 ) snake_case_ : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() snake_case_ : int = numpy_to_pil(_UpperCamelCase ) return images def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" if images.ndim == 3: snake_case_ : Optional[Any] = images[None, ...] snake_case_ : Any = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images snake_case_ : str = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: snake_case_ : List[Any] = [Image.fromarray(_UpperCamelCase ) for image in images] return pil_images
279
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''], '''tokenization_luke''': ['''LukeTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LukeForEntityClassification''', '''LukeForEntityPairClassification''', '''LukeForEntitySpanClassification''', '''LukeForMultipleChoice''', '''LukeForQuestionAnswering''', '''LukeForSequenceClassification''', '''LukeForTokenClassification''', '''LukeForMaskedLM''', '''LukeModel''', '''LukePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
279
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Any = BioGptTokenizer lowerCamelCase_ : Optional[Any] = False def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : Optional[Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(__magic_name__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : str = '''lower newer''' snake_case_ : Dict = '''lower newer''' return input_text, output_text def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file ) snake_case_ : Union[str, Any] = '''lower''' snake_case_ : Optional[int] = ['''low''', '''er</w>'''] snake_case_ : Any = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = tokens + ['''<unk>'''] snake_case_ : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ ) snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
279
1
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets lowerCAmelCase_ = datasets.logging.get_logger(__name__) lowerCAmelCase_ = '''\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric", author = "Moosavi, Nafise Sadat and Strube, Michael", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2016", address = "Berlin, Germany", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P16-1060", doi = "10.18653/v1/P16-1060", pages = "632--642", } ''' lowerCAmelCase_ = '''\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. ''' lowerCAmelCase_ = ''' Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting \'keep_singletons=False\', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs. min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: \'mentions\': mentions \'muc\': MUC metric [Vilain et al, 1995] \'bcub\': B-cubed [Bagga and Baldwin, 1998] \'ceafe\': CEAFe [Luo et al., 2005] \'lea\': LEA [Moosavi and Strube, 2016] \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric(\'coval\') >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\', ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\', ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\', ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\', ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\', ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0} ''' def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase="dummy_doc" ) -> Dict: """simple docstring""" snake_case_ : Any = {doc: key_lines} snake_case_ : Dict = {doc: sys_lines} snake_case_ : Any = {} snake_case_ : str = 0 snake_case_ : str = 0 snake_case_ : Tuple = 0 snake_case_ : List[Any] = 0 snake_case_ : Optional[Any] = 0 snake_case_ : Dict = 0 snake_case_ , snake_case_ : str = reader.get_doc_mentions(_UpperCamelCase , key_doc_lines[doc] , _UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: snake_case_ : Tuple = reader.set_annotated_parse_trees(_UpperCamelCase , key_doc_lines[doc] , _UpperCamelCase , _UpperCamelCase ) snake_case_ , snake_case_ : Any = reader.get_doc_mentions(_UpperCamelCase , sys_doc_lines[doc] , _UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: snake_case_ : Dict = reader.set_annotated_parse_trees(_UpperCamelCase , key_doc_lines[doc] , _UpperCamelCase , _UpperCamelCase ) if remove_nested: snake_case_ , snake_case_ : str = reader.remove_nested_coref_mentions(_UpperCamelCase , _UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters snake_case_ , snake_case_ : List[Any] = reader.remove_nested_coref_mentions(_UpperCamelCase , _UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters snake_case_ : Tuple = reader.get_mention_assignments(_UpperCamelCase , _UpperCamelCase ) snake_case_ : Tuple = reader.get_mention_assignments(_UpperCamelCase , _UpperCamelCase ) snake_case_ : List[str] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( '''Number of resulting singleton clusters in the key ''' f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' '''files, respectively''' ) return doc_coref_infos def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : int = get_coref_infos(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ : Union[str, Any] = {} snake_case_ : Optional[Any] = 0 snake_case_ : Tuple = 0 for name, metric in metrics: snake_case_ , snake_case_ , snake_case_ : List[str] = evaluator.evaluate_documents(_UpperCamelCase , _UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , f'''Recall: {recall * 100:.2f}''' , f''' Precision: {precision * 100:.2f}''' , f''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: snake_case_ : Tuple = (conll / 3) * 100 logger.info(f'''CoNLL score: {conll:.2f}''' ) output_scores.update({'''conll_score''': conll} ) return output_scores def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Union[str, Any] = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: snake_case_ : str = line.split()[5] if not parse_col == "-": snake_case_ : Optional[int] = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Dict: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Sequence(datasets.Value('''string''' ) ), } ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=False ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: snake_case_ : List[Any] = util.check_gold_parse_annotation(__magic_name__ ) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" snake_case_ : Union[str, Any] = evaluate( key_lines=__magic_name__ , sys_lines=__magic_name__ , metrics=__magic_name__ , NP_only=__magic_name__ , remove_nested=__magic_name__ , keep_singletons=__magic_name__ , min_span=__magic_name__ , ) return score
279
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]: """simple docstring""" snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) ) snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )] index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase ) snake_case_ : float = 0 snake_case_ : list[float] = [0] * len(_UpperCamelCase ) for i in index: if weight[i] <= capacity: snake_case_ : Dict = 1 max_value += value[i] capacity -= weight[i] else: snake_case_ : Union[str, Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
279
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[str] = '''camembert''' def __init__(self , __magic_name__=3_0522 , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> str: '''simple docstring''' super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) snake_case_ : Any = vocab_size snake_case_ : int = hidden_size snake_case_ : Tuple = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : Any = hidden_act snake_case_ : str = intermediate_size snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Tuple = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : List[str] = type_vocab_size snake_case_ : Tuple = initializer_range snake_case_ : Dict = layer_norm_eps snake_case_ : int = position_embedding_type snake_case_ : int = use_cache snake_case_ : Optional[Any] = classifier_dropout class __lowerCAmelCase ( _a ): @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": snake_case_ : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case_ : List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
279
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict: '''simple docstring''' snake_case_ : Tuple = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Union[str, Any] = num_channels snake_case_ : Optional[Any] = image_size snake_case_ : int = min_resolution snake_case_ : Any = max_resolution snake_case_ : Tuple = do_resize snake_case_ : str = size_divisor snake_case_ : Optional[Any] = do_rescale def lowerCamelCase (self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = GLPNImageProcessingTester(self ) @property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) ) self.assertTrue(hasattr(__magic_name__ , '''resample''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
279
1
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class __lowerCAmelCase ( _a, _a, unittest.TestCase ): lowerCamelCase_ : Dict = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) lowerCamelCase_ : Optional[Any] = ( { '''feature-extraction''': TFMobileBertModel, '''fill-mask''': TFMobileBertForMaskedLM, '''question-answering''': TFMobileBertForQuestionAnswering, '''text-classification''': TFMobileBertForSequenceClassification, '''token-classification''': TFMobileBertForTokenClassification, '''zero-shot''': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase_ : Optional[int] = False lowerCamelCase_ : Tuple = False def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=False ) -> int: '''simple docstring''' snake_case_ : Any = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): snake_case_ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=32 , __magic_name__=2 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> int: '''simple docstring''' snake_case_ : Any = parent snake_case_ : Optional[int] = batch_size snake_case_ : Any = seq_length snake_case_ : List[Any] = is_training snake_case_ : Any = use_input_mask snake_case_ : Optional[Any] = use_token_type_ids snake_case_ : Tuple = use_labels snake_case_ : Dict = vocab_size snake_case_ : Optional[Any] = hidden_size snake_case_ : int = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : str = hidden_act snake_case_ : Dict = hidden_dropout_prob snake_case_ : int = attention_probs_dropout_prob snake_case_ : Any = max_position_embeddings snake_case_ : Dict = type_vocab_size snake_case_ : Dict = type_sequence_label_size snake_case_ : str = initializer_range snake_case_ : Optional[Any] = num_labels snake_case_ : Tuple = num_choices snake_case_ : List[Any] = scope snake_case_ : Union[str, Any] = embedding_size def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : Dict = None if self.use_token_type_ids: snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ : Tuple = None snake_case_ : int = None snake_case_ : Optional[Any] = None if self.use_labels: snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ : int = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : int = TFMobileBertModel(config=__magic_name__ ) snake_case_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ : Any = model(__magic_name__ ) snake_case_ : int = [input_ids, input_mask] snake_case_ : Dict = model(__magic_name__ ) snake_case_ : Any = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' snake_case_ : str = TFMobileBertForMaskedLM(config=__magic_name__ ) snake_case_ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ : List[Any] = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' snake_case_ : Tuple = TFMobileBertForNextSentencePrediction(config=__magic_name__ ) snake_case_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ : Dict = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : Dict = TFMobileBertForPreTraining(config=__magic_name__ ) snake_case_ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ : List[str] = model(__magic_name__ ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.num_labels snake_case_ : Optional[int] = TFMobileBertForSequenceClassification(config=__magic_name__ ) snake_case_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ : Any = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.num_choices snake_case_ : List[str] = TFMobileBertForMultipleChoice(config=__magic_name__ ) snake_case_ : Optional[Any] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) snake_case_ : List[Any] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) snake_case_ : Tuple = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) snake_case_ : Dict = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } snake_case_ : List[str] = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' snake_case_ : List[str] = self.num_labels snake_case_ : Any = TFMobileBertForTokenClassification(config=__magic_name__ ) snake_case_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ : str = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : str = TFMobileBertForQuestionAnswering(config=__magic_name__ ) snake_case_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ : Dict = model(__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : str = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) : Tuple = config_and_inputs snake_case_ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Tuple = TFMobileBertModelTest.TFMobileBertModelTester(self ) snake_case_ : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__magic_name__ ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__magic_name__ ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__magic_name__ ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__magic_name__ ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__magic_name__ ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__magic_name__ ) @slow def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' for model_name in ["google/mobilebert-uncased"]: snake_case_ : str = TFMobileBertModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_tf class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) snake_case_ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] ) snake_case_ : Union[str, Any] = model(__magic_name__ )[0] snake_case_ : Optional[Any] = [1, 6, 3_0522] self.assertEqual(output.shape , __magic_name__ ) snake_case_ : Optional[Any] = tf.constant( [ [ [-4.5_919_547, -9.248_295, -9.645_256], [-6.7_306_175, -6.440_284, -6.6_052_837], [-7.2_743_506, -6.7_847_915, -6.024_673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1e-4 )
279
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
279
1
from cva import destroyAllWindows, imread, imshow, waitKey def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ , snake_case_ : List[Any] = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): snake_case_ : Dict = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) # convert to its negative lowerCAmelCase_ = convert_to_negative(img) # show result image imshow('''negative of original image''', img) waitKey(0) destroyAllWindows()
279
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase_ = float('''nan''') class __lowerCAmelCase : def __init__(self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : List[Any] = sys.stdout snake_case_ : int = open(__magic_name__ , '''a''' ) def __getattr__(self , __magic_name__ ) -> Dict: '''simple docstring''' return getattr(self.stdout , __magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' self.stdout.write(__magic_name__ ) # strip tqdm codes self.file.write(re.sub(R'''^.*\r''' , '''''' , __magic_name__ , 0 , re.M ) ) def lowerCamelCase_ ( _UpperCamelCase=80 , _UpperCamelCase=False ) -> str: """simple docstring""" snake_case_ : str = [] # deal with critical env vars snake_case_ : int = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: snake_case_ : Optional[int] = os.environ.get(_UpperCamelCase , _UpperCamelCase ) if val is not None: cmd.append(f'''{key}={val}''' ) # python executable (not always needed if the script is executable) snake_case_ : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(_UpperCamelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes snake_case_ : Dict = [] snake_case_ : Dict = '''''' while len(_UpperCamelCase ) > 0: current_line += f'''{cmd.pop(0 )} ''' if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(_UpperCamelCase ) snake_case_ : List[Any] = '''''' return "\\\n".join(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" snake_case_ : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd ) # remove --output_dir if any and set our own snake_case_ : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd ) args.base_cmd += f''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir snake_case_ : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , ) snake_case_ : Tuple = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase ) if verbose: print('''STDOUT''' , result.stdout ) print('''STDERR''' , result.stderr ) # save the streams snake_case_ : Any = variation.replace(''' ''' , '''-''' ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f: f.write(result.stdout ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f: snake_case_ : str = json.load(_UpperCamelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple: """simple docstring""" snake_case_ : Tuple = [] snake_case_ : Any = [] snake_case_ : int = f'''{id}: {variation:<{longest_variation_len}}''' snake_case_ : Optional[Any] = f'''{preamble}: ''' snake_case_ : Optional[int] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ): snake_case_ : int = process_run_single( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ : List[str] = single_run_metrics[target_metric_key] if not math.isnan(_UpperCamelCase ): metrics.append(_UpperCamelCase ) results.append(_UpperCamelCase ) outcome += "✓" else: outcome += "✘" snake_case_ : Any = f'''\33[2K\r{outcome}''' if len(_UpperCamelCase ) > 0: snake_case_ : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} snake_case_ : Any = round(mean_metrics[target_metric_key] , 2 ) snake_case_ : List[str] = f'''{outcome} {mean_target}''' if len(_UpperCamelCase ) > 1: results_str += f''' {tuple(round(_UpperCamelCase , 2 ) for x in results )}''' print(_UpperCamelCase ) snake_case_ : Optional[int] = variation return mean_metrics else: print(_UpperCamelCase ) return {variation_key: variation, target_metric_key: nan} def lowerCamelCase_ ( ) -> Optional[int]: """simple docstring""" snake_case_ : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return f''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : str = pd.DataFrame(_UpperCamelCase ) snake_case_ : Optional[int] = '''variation''' snake_case_ : Union[str, Any] = '''diff_%''' snake_case_ : Optional[int] = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan snake_case_ : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(_UpperCamelCase ): # as a fallback, use the minimal value as the sentinel snake_case_ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(_UpperCamelCase ): snake_case_ : Dict = df.apply( lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='''columns''' , ) # re-order columns snake_case_ : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys] snake_case_ : int = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols # capitalize snake_case_ : Optional[int] = df.rename(str.capitalize , axis='''columns''' ) # make the cols as narrow as possible snake_case_ : Any = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' ) snake_case_ : int = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' ) snake_case_ : Tuple = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] print('''\n\n'''.join(_UpperCamelCase ) ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) snake_case_ : Tuple = parser.parse_args() snake_case_ : Optional[Any] = args.output_dir Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) snake_case_ : Optional[int] = get_base_command(_UpperCamelCase , _UpperCamelCase ) # split each dimension into its --foo variations snake_case_ : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty snake_case_ : List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) ) snake_case_ : Optional[int] = max(len(_UpperCamelCase ) for x in variations ) # split wanted keys snake_case_ : int = args.report_metric_keys.split() # capture prints into a log file for convenience snake_case_ : str = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(f'''and this script\'s output is also piped into {report_fn}''' ) snake_case_ : Tuple = Tee(_UpperCamelCase ) print(f'''\n*** Running {len(_UpperCamelCase )} benchmarks:''' ) print(f'''Base command: {" ".join(_UpperCamelCase )}''' ) snake_case_ : List[Any] = '''variation''' snake_case_ : Tuple = [] for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ): snake_case_ : Optional[Any] = base_cmd + variation.split() results.append( process_run( id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) ) process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase ) if __name__ == "__main__": main()
279
1
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : int = GPTSwaTokenizer lowerCamelCase_ : Dict = False lowerCamelCase_ : List[str] = True lowerCamelCase_ : Dict = False def lowerCamelCase (self ) -> Dict: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case_ : Any = GPTSwaTokenizer(__magic_name__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : List[Any] = '''This is a test''' snake_case_ : int = '''This is a test''' return input_text, output_text def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = '''<s>''' snake_case_ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(__magic_name__ ) , 2000 ) def lowerCamelCase (self ) -> int: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : int = GPTSwaTokenizer(__magic_name__ ) snake_case_ : Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__magic_name__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [465, 287, 265, 631, 842] ) snake_case_ : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) # fmt: off self.assertListEqual( __magic_name__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(__magic_name__ ) # fmt: off self.assertListEqual( __magic_name__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] ) # fmt: on def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : List[Any] = GPTSwaTokenizer(__magic_name__ ) snake_case_ : Optional[int] = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] snake_case_ : Union[str, Any] = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__magic_name__ , __magic_name__ ): self.assertListEqual(tokenizer.encode_fast(__magic_name__ ) , __magic_name__ ) # Test that decode_fast returns the input text for text, token_ids in zip(__magic_name__ , __magic_name__ ): self.assertEqual(tokenizer.decode_fast(__magic_name__ ) , __magic_name__ ) @slow def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Dict = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off snake_case_ : Optional[int] = {'''input_ids''': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__magic_name__ , )
279
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowerCAmelCase_ = CLIPImageProcessor() lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') lowerCAmelCase_ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
279
1
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=2 , __magic_name__=56 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=2 , __magic_name__=2 , __magic_name__=7 , __magic_name__="gelu_new" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , __magic_name__="block_sparse" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=3 , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = parent snake_case_ : str = batch_size snake_case_ : str = seq_length snake_case_ : str = is_training snake_case_ : List[str] = use_attention_mask snake_case_ : List[Any] = use_token_type_ids snake_case_ : Optional[int] = use_labels snake_case_ : List[str] = vocab_size snake_case_ : str = hidden_size snake_case_ : int = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Optional[Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Optional[Any] = max_position_embeddings snake_case_ : Union[str, Any] = type_vocab_size snake_case_ : Union[str, Any] = type_sequence_label_size snake_case_ : int = initializer_range snake_case_ : Union[str, Any] = num_choices snake_case_ : Tuple = rescale_embeddings snake_case_ : Dict = attention_type snake_case_ : str = use_bias snake_case_ : Any = block_size snake_case_ : Tuple = num_random_blocks def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : int = None if self.use_attention_mask: snake_case_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : Any = None if self.use_token_type_ids: snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ : Union[str, Any] = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Tuple = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = config_and_inputs snake_case_ : Tuple = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask, } return config, inputs_dict @require_flax class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : str = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) lowerCamelCase_ : Any = False lowerCamelCase_ : List[str] = False def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCamelCase (self ) -> Dict: '''simple docstring''' super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCamelCase (self ) -> List[str]: '''simple docstring''' super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCamelCase (self ) -> Tuple: '''simple docstring''' super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCamelCase (self ) -> Any: '''simple docstring''' super().test_hidden_states_output() @slow def lowerCamelCase (self ) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: snake_case_ : Optional[int] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): snake_case_ : str = self._prepare_for_class(__magic_name__ , __magic_name__ ) snake_case_ : Union[str, Any] = model_class(__magic_name__ ) @jax.jit def model_jitted(__magic_name__ , __magic_name__=None , **__magic_name__ ): return model(input_ids=__magic_name__ , attention_mask=__magic_name__ , **__magic_name__ ) with self.subTest('''JIT Enabled''' ): snake_case_ : List[Any] = model_jitted(**__magic_name__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): snake_case_ : List[str] = model_jitted(**__magic_name__ ).to_tuple() self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) for jitted_output, output in zip(__magic_name__ , __magic_name__ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1e-5 , __magic_name__="outputs" , __magic_name__=None ) -> List[str]: '''simple docstring''' if name.startswith('''outputs.attentions''' ): return else: super().check_pt_flax_outputs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
279
from math import factorial lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)} def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) ) def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length snake_case_ : Optional[Any] = 0 # the cached sizes of the previous chains snake_case_ : dict[int, int] = {} for start_chain_element in range(1 , _UpperCamelCase ): # The temporary set will contain the elements of the chain snake_case_ : List[str] = set() snake_case_ : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. snake_case_ : Any = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_UpperCamelCase ) chain_set_length += 1 snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] snake_case_ : List[str] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution()}''')
279
1
lowerCAmelCase_ = [ (1_0_0_0, '''M'''), (9_0_0, '''CM'''), (5_0_0, '''D'''), (4_0_0, '''CD'''), (1_0_0, '''C'''), (9_0, '''XC'''), (5_0, '''L'''), (4_0, '''XL'''), (1_0, '''X'''), (9, '''IX'''), (5, '''V'''), (4, '''IV'''), (1, '''I'''), ] def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ : Union[str, Any] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} snake_case_ : Optional[Any] = 0 snake_case_ : Optional[int] = 0 while place < len(_UpperCamelCase ): if (place + 1 < len(_UpperCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : List[str] = [] for arabic, roman in ROMAN: ((snake_case_) , (snake_case_)) : List[str] = divmod(_UpperCamelCase , _UpperCamelCase ) result.append(roman * factor ) if number == 0: break return "".join(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
279
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''''' lowerCamelCase_ : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) lowerCamelCase_ : str = None # compression type in fsspec. ex: "gzip" lowerCamelCase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__(self , __magic_name__ = "" , __magic_name__ = None , __magic_name__ = None , **__magic_name__ ) -> Any: '''simple docstring''' super().__init__(self , **__magic_name__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case_ : Union[str, Any] = fsspec.open( __magic_name__ , mode='''rb''' , protocol=__magic_name__ , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) snake_case_ : Tuple = os.path.basename(self.file.path.split('''::''' )[0] ) snake_case_ : Optional[Any] = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) snake_case_ : Dict = None @classmethod def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]: '''simple docstring''' return super()._strip_protocol(__magic_name__ ).lstrip('''/''' ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' if self.dir_cache is None: snake_case_ : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} snake_case_ : List[str] = {f['''name''']: f} def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return self.file.open().read() def lowerCamelCase (self , __magic_name__ , __magic_name__ = "rb" , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = self._strip_protocol(__magic_name__ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''bz2''' lowerCamelCase_ : Any = '''bz2''' lowerCamelCase_ : int = '''.bz2''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''gzip''' lowerCamelCase_ : Dict = '''gzip''' lowerCamelCase_ : int = '''.gz''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''lz4''' lowerCamelCase_ : Any = '''lz4''' lowerCamelCase_ : Optional[Any] = '''.lz4''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''xz''' lowerCamelCase_ : Any = '''xz''' lowerCamelCase_ : int = '''.xz''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''zstd''' lowerCamelCase_ : Tuple = '''zstd''' lowerCamelCase_ : Any = '''.zst''' def __init__(self , __magic_name__ , __magic_name__ = "rb" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = DEFAULT_BLOCK_SIZE , **__magic_name__ , ) -> Tuple: '''simple docstring''' super().__init__( fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case_ : Dict = self.file.__enter__ class __lowerCAmelCase : def __init__(self , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : str = file_ def __enter__(self ) -> List[Any]: '''simple docstring''' self._file.__enter__() return self def __exit__(self , *__magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' self._file.__exit__(*__magic_name__ , **__magic_name__ ) def __iter__(self ) -> Optional[int]: '''simple docstring''' return iter(self._file ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return next(self._file ) def __getattr__(self , __magic_name__ ) -> str: '''simple docstring''' return getattr(self._file , __magic_name__ ) def fixed_enter(*__magic_name__ , **__magic_name__ ): return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) ) snake_case_ : Tuple = fixed_enter
279
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCAmelCase_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[str] = ['''pixel_values'''] def __init__(self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = True , __magic_name__ = None , __magic_name__ = True , __magic_name__ = 1 / 255 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> None: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[str] = size if size is not None else {'''shortest_edge''': 224} snake_case_ : int = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) snake_case_ : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} snake_case_ : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ , param_name='''crop_size''' ) snake_case_ : List[str] = do_resize snake_case_ : int = size snake_case_ : str = resample snake_case_ : List[Any] = do_center_crop snake_case_ : Union[str, Any] = crop_size snake_case_ : Tuple = do_rescale snake_case_ : List[Any] = rescale_factor snake_case_ : Union[str, Any] = do_normalize snake_case_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN snake_case_ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD snake_case_ : Tuple = do_convert_rgb def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray: '''simple docstring''' snake_case_ : Any = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case_ : Optional[int] = get_resize_output_image_size(__magic_name__ , size=size['''shortest_edge'''] , default_to_square=__magic_name__ ) return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray: '''simple docstring''' snake_case_ : int = get_size_dict(__magic_name__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__magic_name__ , size=(size['''height'''], size['''width''']) , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> Dict: '''simple docstring''' return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray: '''simple docstring''' return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ) -> PIL.Image.Image: '''simple docstring''' snake_case_ : Any = do_resize if do_resize is not None else self.do_resize snake_case_ : Optional[Any] = size if size is not None else self.size snake_case_ : Tuple = get_size_dict(__magic_name__ , param_name='''size''' , default_to_square=__magic_name__ ) snake_case_ : int = resample if resample is not None else self.resample snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : str = crop_size if crop_size is not None else self.crop_size snake_case_ : Optional[int] = get_size_dict(__magic_name__ , param_name='''crop_size''' , default_to_square=__magic_name__ ) snake_case_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean snake_case_ : Optional[int] = image_std if image_std is not None else self.image_std snake_case_ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ : List[str] = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ : Optional[Any] = [convert_to_rgb(__magic_name__ ) for image in images] # All transformations expect numpy arrays. snake_case_ : Union[str, Any] = [to_numpy_array(__magic_name__ ) for image in images] if do_resize: snake_case_ : List[str] = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images] if do_center_crop: snake_case_ : List[Any] = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images] if do_rescale: snake_case_ : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images] if do_normalize: snake_case_ : int = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images] snake_case_ : Optional[Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] snake_case_ : int = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
279
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''megatron-bert''' def __init__(self , __magic_name__=2_9056 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=__magic_name__ , **__magic_name__ ) snake_case_ : Union[str, Any] = vocab_size snake_case_ : Dict = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : int = hidden_act snake_case_ : List[str] = intermediate_size snake_case_ : Dict = hidden_dropout_prob snake_case_ : str = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : Any = type_vocab_size snake_case_ : int = initializer_range snake_case_ : int = layer_norm_eps snake_case_ : List[str] = position_embedding_type snake_case_ : Dict = use_cache
279
1
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __lowerCAmelCase : lowerCamelCase_ : Optional[Any] = None def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) snake_case_ : Optional[int] = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __magic_name__ ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(__magic_name__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(__magic_name__ ) snake_case_ : Dict = self.feature_extraction_class.from_json_file(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[Any] = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) snake_case_ : Optional[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = self.feature_extraction_class() self.assertIsNotNone(__magic_name__ )
279
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch lowerCAmelCase_ = random.Random() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]: """simple docstring""" if rng is None: snake_case_ : str = global_rng snake_case_ : Any = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=400 , __magic_name__=2000 , __magic_name__=10 , __magic_name__=160 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=4000 , __magic_name__=False , __magic_name__=True , ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = parent snake_case_ : str = batch_size snake_case_ : Union[str, Any] = min_seq_length snake_case_ : Tuple = max_seq_length snake_case_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case_ : Optional[int] = padding_value snake_case_ : Union[str, Any] = sampling_rate snake_case_ : Optional[int] = return_attention_mask snake_case_ : str = do_normalize snake_case_ : str = feature_size snake_case_ : Optional[Any] = chunk_length snake_case_ : Union[str, Any] = hop_length def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCamelCase (self , __magic_name__=False , __magic_name__=False ) -> Optional[Any]: '''simple docstring''' def _flatten(__magic_name__ ): return list(itertools.chain(*__magic_name__ ) ) if equal_length: snake_case_ : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size snake_case_ : int = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case_ : str = [np.asarray(__magic_name__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = WhisperFeatureExtractionTester(self ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Union[str, Any] = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) snake_case_ : List[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ ) snake_case_ : Optional[int] = feat_extract_first.to_dict() snake_case_ : Dict = feat_extract_second.to_dict() snake_case_ : List[str] = feat_extract_first.mel_filters snake_case_ : Union[str, Any] = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(__magic_name__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(__magic_name__ ) snake_case_ : Optional[int] = self.feature_extraction_class.from_json_file(__magic_name__ ) snake_case_ : int = feat_extract_first.to_dict() snake_case_ : Optional[int] = feat_extract_second.to_dict() snake_case_ : Union[str, Any] = feat_extract_first.mel_filters snake_case_ : str = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] snake_case_ : str = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] # Test feature size snake_case_ : str = feature_extractor(__magic_name__ , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input snake_case_ : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features snake_case_ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test batched snake_case_ : int = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] snake_case_ : List[str] = np.asarray(__magic_name__ ) snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : Dict = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) # Test truncation required snake_case_ : Any = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] snake_case_ : Union[str, Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] snake_case_ : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs] snake_case_ : Optional[Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated] snake_case_ : Any = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) ) def lowerCamelCase (self ) -> int: '''simple docstring''' import torch snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa ) snake_case_ : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech snake_case_ : Optional[Any] = ds.sort('''id''' ).select(range(__magic_name__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : str = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on snake_case_ : List[Any] = self._load_datasamples(1 ) snake_case_ : Union[str, Any] = WhisperFeatureExtractor() snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1e-4 ) ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ : Optional[int] = self._load_datasamples(1 )[0] snake_case_ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue snake_case_ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0] self.assertTrue(np.all(np.mean(__magic_name__ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1e-3 ) )
279
1
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py lowerCAmelCase_ = '''\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", author = "Lin, Chin-Yew and Och, Franz Josef", booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics", month = "aug 23{--}aug 27", year = "2004", address = "Geneva, Switzerland", publisher = "COLING", url = "https://www.aclweb.org/anthology/C04-1072", pages = "501--507", } ''' lowerCAmelCase_ = '''\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation, the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. ''' lowerCAmelCase_ = ''' Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: \'bleu\': bleu score, \'precisions\': geometric mean of n-gram precisions, \'brevity_penalty\': brevity penalty, \'length_ratio\': ratio of lengths, \'translation_length\': translation_length, \'reference_length\': reference_length Examples: >>> predictions = [ ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample ... ] >>> references = [ ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references) ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric("bleu") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results["bleu"]) 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Dict: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=4 , __magic_name__=False ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = compute_bleu( reference_corpus=__magic_name__ , translation_corpus=__magic_name__ , max_order=__magic_name__ , smooth=__magic_name__ ) ((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) : Any = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
279
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase_ = logging.getLogger(__name__) def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" snake_case_ : List[str] = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=_UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=_UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=_UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=_UpperCamelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=_UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=_UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) snake_case_ : List[Any] = parser.parse_args() return args def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" def fn(_UpperCamelCase ): return tokenizer(examples['''text'''] ) return fn def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Any = [] for i in range(len(tokenized_data['''input_ids'''] ) ): snake_case_ : Any = { '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } snake_case_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase ) snake_case_ : Optional[Any] = tf.train.Example(features=_UpperCamelCase ) snake_case_ : Optional[Any] = example.SerializeToString() records.append(_UpperCamelCase ) return records def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , args.limit ) snake_case_ : int = dataset.select(range(_UpperCamelCase ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) snake_case_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) snake_case_ : str = os.path.join(args.output_dir , args.split ) if not os.path.exists(_UpperCamelCase ): os.makedirs(_UpperCamelCase ) else: snake_case_ : Optional[Any] = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. snake_case_ : Optional[Any] = tokenize_function(_UpperCamelCase ) snake_case_ : List[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(_UpperCamelCase ): # Concatenate all texts. snake_case_ : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()} snake_case_ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 snake_case_ : int = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. snake_case_ : Union[str, Any] = { k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )] for k, t in concatenated_examples.items() } return result snake_case_ : int = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1_000 , num_proc=4 ) snake_case_ : str = 0 snake_case_ : Optional[Any] = 0 for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ): snake_case_ : Any = grouped_dataset[shard : shard + args.shard_size] snake_case_ : str = len(dataset_snapshot['''input_ids'''] ) snake_case_ : Union[str, Any] = os.path.join(_UpperCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) snake_case_ : Dict = get_serialized_examples(_UpperCamelCase ) with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file: for i in range(len(_UpperCamelCase ) ): snake_case_ : List[str] = serialized_examples[i] out_file.write(_UpperCamelCase ) print('''Wrote file {} containing {} records'''.format(_UpperCamelCase , _UpperCamelCase ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f: print(f'''Total {args.split} records: {total_records}''' , file=_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = parse_args() main(args)
279
1
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase_ = logging.getLogger(__name__) def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" snake_case_ : List[str] = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=_UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=_UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=_UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=_UpperCamelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=_UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=_UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) snake_case_ : List[Any] = parser.parse_args() return args def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" def fn(_UpperCamelCase ): return tokenizer(examples['''text'''] ) return fn def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Any = [] for i in range(len(tokenized_data['''input_ids'''] ) ): snake_case_ : Any = { '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } snake_case_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase ) snake_case_ : Optional[Any] = tf.train.Example(features=_UpperCamelCase ) snake_case_ : Optional[Any] = example.SerializeToString() records.append(_UpperCamelCase ) return records def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , args.limit ) snake_case_ : int = dataset.select(range(_UpperCamelCase ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) snake_case_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) snake_case_ : str = os.path.join(args.output_dir , args.split ) if not os.path.exists(_UpperCamelCase ): os.makedirs(_UpperCamelCase ) else: snake_case_ : Optional[Any] = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. snake_case_ : Optional[Any] = tokenize_function(_UpperCamelCase ) snake_case_ : List[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(_UpperCamelCase ): # Concatenate all texts. snake_case_ : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()} snake_case_ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 snake_case_ : int = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. snake_case_ : Union[str, Any] = { k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )] for k, t in concatenated_examples.items() } return result snake_case_ : int = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1_000 , num_proc=4 ) snake_case_ : str = 0 snake_case_ : Optional[Any] = 0 for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ): snake_case_ : Any = grouped_dataset[shard : shard + args.shard_size] snake_case_ : str = len(dataset_snapshot['''input_ids'''] ) snake_case_ : Union[str, Any] = os.path.join(_UpperCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) snake_case_ : Dict = get_serialized_examples(_UpperCamelCase ) with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file: for i in range(len(_UpperCamelCase ) ): snake_case_ : List[str] = serialized_examples[i] out_file.write(_UpperCamelCase ) print('''Wrote file {} containing {} records'''.format(_UpperCamelCase , _UpperCamelCase ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f: print(f'''Total {args.split} records: {total_records}''' , file=_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = parse_args() main(args)
279
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Any = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case_ : List[Any] = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 ) snake_case_ : str = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' for example in examples: snake_case_ : Union[str, Any] = video_classifier(__magic_name__ ) self.assertEqual( __magic_name__ , [ {'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )}, {'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )}, ] , ) @require_torch def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Any = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' snake_case_ : str = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) snake_case_ : int = pipeline( '''video-classification''' , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 ) snake_case_ : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) snake_case_ : Union[str, Any] = video_classifier(__magic_name__ , top_k=2 ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , ) snake_case_ : int = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' pass
279
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class __lowerCAmelCase ( _a ): lowerCamelCase_ : str = '''vit_mae''' def __init__(self , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=224 , __magic_name__=16 , __magic_name__=3 , __magic_name__=True , __magic_name__=16 , __magic_name__=512 , __magic_name__=8 , __magic_name__=2048 , __magic_name__=0.75 , __magic_name__=False , **__magic_name__ , ) -> Tuple: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : Optional[Any] = hidden_size snake_case_ : int = num_hidden_layers snake_case_ : str = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Optional[Any] = hidden_act snake_case_ : int = hidden_dropout_prob snake_case_ : List[Any] = attention_probs_dropout_prob snake_case_ : Any = initializer_range snake_case_ : Tuple = layer_norm_eps snake_case_ : Optional[int] = image_size snake_case_ : List[Any] = patch_size snake_case_ : str = num_channels snake_case_ : Tuple = qkv_bias snake_case_ : List[Any] = decoder_num_attention_heads snake_case_ : int = decoder_hidden_size snake_case_ : Dict = decoder_num_hidden_layers snake_case_ : Any = decoder_intermediate_size snake_case_ : Optional[int] = mask_ratio snake_case_ : Optional[Any] = norm_pix_loss
279
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" return "\n".join( f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=1_0))
279
1
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Optional[Any]: """simple docstring""" try: snake_case_ : Dict = os.environ[key] except KeyError: # KEY isn't set, default to `default`. snake_case_ : Optional[int] = default else: # KEY is set, convert it to True or False. try: snake_case_ : Union[str, Any] = strtobool(_UpperCamelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''' ) return _value lowerCAmelCase_ = parse_flag_from_env('''RUN_SLOW''', default=False) lowerCAmelCase_ = parse_flag_from_env('''RUN_REMOTE''', default=False) lowerCAmelCase_ = parse_flag_from_env('''RUN_LOCAL''', default=True) lowerCAmelCase_ = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression lowerCAmelCase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') lowerCAmelCase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') lowerCAmelCase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio lowerCAmelCase_ = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam lowerCAmelCase_ = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility lowerCAmelCase_ = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows lowerCAmelCase_ = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]: """simple docstring""" try: import faiss # noqa except ImportError: snake_case_ : Optional[int] = unittest.skip('''test requires faiss''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" try: import regex # noqa except ImportError: snake_case_ : List[Any] = unittest.skip('''test requires regex''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" try: import elasticsearch # noqa except ImportError: snake_case_ : str = unittest.skip('''test requires elasticsearch''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" try: import sqlalchemy # noqa except ImportError: snake_case_ : Optional[Any] = unittest.skip('''test requires sqlalchemy''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" if not config.TORCH_AVAILABLE: snake_case_ : Optional[int] = unittest.skip('''test requires PyTorch''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" if not config.TF_AVAILABLE: snake_case_ : Union[str, Any] = unittest.skip('''test requires TensorFlow''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" if not config.JAX_AVAILABLE: snake_case_ : Union[str, Any] = unittest.skip('''test requires JAX''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" if not config.PIL_AVAILABLE: snake_case_ : Optional[Any] = unittest.skip('''test requires Pillow''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('''test requires transformers''' )(_UpperCamelCase ) else: return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('''test requires tiktoken''' )(_UpperCamelCase ) else: return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('''test requires spacy''' )(_UpperCamelCase ) else: return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" def _require_spacy_model(_UpperCamelCase ): try: import spacy # noqa F401 spacy.load(_UpperCamelCase ) except ImportError: return unittest.skip('''test requires spacy''' )(_UpperCamelCase ) except OSError: return unittest.skip('''test requires spacy model \'{}\''''.format(_UpperCamelCase ) )(_UpperCamelCase ) else: return test_case return _require_spacy_model def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('''test requires pyspark''' )(_UpperCamelCase ) else: return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('''test requires joblibspark''' )(_UpperCamelCase ) else: return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: snake_case_ : Any = unittest.skip('''test is slow''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> Dict: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: snake_case_ : List[str] = unittest.skip('''test is local''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> Dict: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: snake_case_ : List[Any] = unittest.skip('''test is packaged''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: snake_case_ : Any = unittest.skip('''test requires remote''' )(_UpperCamelCase ) return test_case def lowerCamelCase_ ( *_UpperCamelCase ) -> List[Any]: """simple docstring""" def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(_UpperCamelCase ) and name.startswith('''test''' ): for decorator in decorators: snake_case_ : Dict = decorator(_UpperCamelCase ) setattr(cls , _UpperCamelCase , _UpperCamelCase ) return cls return decorate class __lowerCAmelCase ( _a ): pass class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[int] = 0 lowerCamelCase_ : Optional[int] = 1 lowerCamelCase_ : Optional[Any] = 2 @contextmanager def lowerCamelCase_ ( _UpperCamelCase=OfflineSimulationMode.CONNECTION_FAILS , _UpperCamelCase=1E-16 ) -> Dict: """simple docstring""" snake_case_ : List[str] = requests.Session().request def timeout_request(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): # Change the url to an invalid url so that the connection hangs snake_case_ : Optional[int] = '''https://10.255.255.1''' if kwargs.get('''timeout''' ) is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) snake_case_ : List[Any] = timeout try: return online_request(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier snake_case_ : Any = url snake_case_ : List[Any] = e.args[0] snake_case_ : Optional[Any] = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),) snake_case_ : str = (max_retry_error,) raise def raise_connection_error(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): raise requests.ConnectionError('''Offline mode is enabled.''' , request=_UpperCamelCase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('''requests.Session.send''' , _UpperCamelCase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('''requests.Session.request''' , _UpperCamelCase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCamelCase ): yield else: raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' ) @contextmanager def lowerCamelCase_ ( *_UpperCamelCase , **_UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Union[str, Any] = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_UpperCamelCase , **_UpperCamelCase ) as tmp_dir: try: os.chdir(_UpperCamelCase ) yield finally: os.chdir(_UpperCamelCase ) @contextmanager def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() snake_case_ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowerCamelCase_ ( ) -> Any: """simple docstring""" import gc gc.collect() snake_case_ : Optional[int] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" return deepcopy(_UpperCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(_UpperCamelCase ).integers(0 , 100 , 10 ).tolist() def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ): try: return func(*_UpperCamelCase , **_UpperCamelCase ) except HTTPError as err: if str(_UpperCamelCase ).startswith('''500''' ) or str(_UpperCamelCase ).startswith('''502''' ): pytest.xfail(str(_UpperCamelCase ) ) raise err return decorator.decorator(_wrapper , _UpperCamelCase ) class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = returncode snake_case_ : Union[str, Any] = stdout snake_case_ : List[str] = stderr async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" while True: snake_case_ : Optional[int] = await stream.readline() if line: callback(_UpperCamelCase ) else: break async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ) -> _RunOutput: """simple docstring""" if echo: print('''\nRunning: ''' , ''' '''.join(_UpperCamelCase ) ) snake_case_ : Optional[Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) snake_case_ : Union[str, Any] = [] snake_case_ : Optional[Any] = [] def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ): snake_case_ : str = line.decode('''utf-8''' ).rstrip() sink.append(_UpperCamelCase ) if not quiet: print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='''stdout:''' ) ), _read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='''stderr:''' ) ), ] , timeout=_UpperCamelCase , ) return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ) -> _RunOutput: """simple docstring""" snake_case_ : List[Any] = asyncio.get_event_loop() snake_case_ : Tuple = loop.run_until_complete( _stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) ) snake_case_ : str = ''' '''.join(_UpperCamelCase ) if result.returncode > 0: snake_case_ : int = '''\n'''.join(result.stderr ) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' ) return result def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" snake_case_ : str = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' ) snake_case_ : Any = re.sub(R'''^gw''' , '''''' , _UpperCamelCase , 0 , re.M ) return int(_UpperCamelCase ) def lowerCamelCase_ ( ) -> List[str]: """simple docstring""" snake_case_ : Optional[int] = 29_500 snake_case_ : Any = pytest_xdist_worker_id() return port + uniq_delta
279
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCAmelCase_ = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" snake_case_ : List[str] = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ : str = list(s_dict.keys() ) for key in keys: snake_case_ : Optional[int] = key for k, v in WHISPER_MAPPING.items(): if k in key: snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase ) print(f'''{key} -> {new_key}''' ) snake_case_ : Tuple = s_dict.pop(_UpperCamelCase ) return s_dict def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ , snake_case_ : Dict = emb.weight.shape snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase ) snake_case_ : Any = emb.weight.data return lin_layer def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes: """simple docstring""" os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) snake_case_ : List[Any] = os.path.basename(_UpperCamelCase ) snake_case_ : Any = url.split('''/''' )[-2] snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase ) if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ): raise RuntimeError(f'''{download_target} exists and is not a regular file''' ) if os.path.isfile(_UpperCamelCase ): snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read() if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' ) with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop: while True: snake_case_ : Dict = source.read(8_192 ) if not buffer: break output.write(_UpperCamelCase ) loop.update(len(_UpperCamelCase ) ) snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read() if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" if ".pt" not in checkpoint_path: snake_case_ : str = _download(_MODELS[checkpoint_path] ) else: snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' ) snake_case_ : int = original_checkpoint['''dims'''] snake_case_ : List[str] = original_checkpoint['''model_state_dict'''] snake_case_ : str = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(_UpperCamelCase ) rename_keys(_UpperCamelCase ) snake_case_ : Optional[int] = True snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] snake_case_ : List[str] = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase ) if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f''' but all the following weights are missing {missing}''' ) if tie_embeds: snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case_ : Any = proj_out_weights model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCAmelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
279
1
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) snake_case_ : Tuple = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(__magic_name__ ) from datasets import load_dataset snake_case_ : Optional[Any] = load_dataset('''nielsr/rvlcdip-demo''' ) snake_case_ : Any = dataset['''train'''][0]['''image'''].convert('''RGB''' ) snake_case_ : Tuple = image_processor(__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : Union[str, Any] = model(**__magic_name__ ) snake_case_ : int = outputs.logits snake_case_ : Dict = torch.Size((1, 16) ) self.assertEqual(logits.shape , __magic_name__ ) snake_case_ : int = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=__magic_name__ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
279
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase_ = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase_ = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase_ = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase_ = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ , snake_case_ : Any = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) ) snake_case_ : Any = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] snake_case_ , snake_case_ : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase_ ( _UpperCamelCase = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(_UpperCamelCase )) @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : str = PokerHand(_UpperCamelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" assert PokerHand(_UpperCamelCase )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" assert PokerHand(_UpperCamelCase )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Dict = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS] snake_case_ : str = poker_hands.copy() shuffle(_UpperCamelCase ) snake_case_ : List[str] = chain(sorted(_UpperCamelCase ) ) for index, hand in enumerate(_UpperCamelCase ): assert hand == poker_hands[index] def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=_UpperCamelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase_ ( ) -> str: """simple docstring""" snake_case_ : Dict = PokerHand('''2C 4S AS 3D 5C''' ) snake_case_ : str = True snake_case_ : Tuple = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase_ ( ) -> List[str]: """simple docstring""" snake_case_ : List[str] = 0 snake_case_ : Union[str, Any] = os.path.abspath(os.path.dirname(_UpperCamelCase ) ) snake_case_ : Dict = os.path.join(_UpperCamelCase , '''poker_hands.txt''' ) with open(_UpperCamelCase ) as file_hand: for line in file_hand: snake_case_ : Dict = line[:14].strip() snake_case_ : List[str] = line[15:].strip() snake_case_ , snake_case_ : str = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase ) snake_case_ : int = player.compare_with(_UpperCamelCase ) if output == "Win": answer += 1 assert answer == 376
279
1
from __future__ import annotations from decimal import Decimal from numpy import array def lowerCamelCase_ ( _UpperCamelCase ) -> list[list[float]]: """simple docstring""" snake_case_ : List[Any] = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix snake_case_ : Tuple = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creates a copy of the matrix with swapped positions of the elements snake_case_ : Dict = [[0.0, 0.0], [0.0, 0.0]] snake_case_ , snake_case_ : Optional[Any] = matrix[1][1], matrix[0][0] snake_case_ , snake_case_ : List[str] = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_UpperCamelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule snake_case_ : Any = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creating cofactor matrix snake_case_ : Tuple = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] snake_case_ : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) snake_case_ : str = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) snake_case_ : List[Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) snake_case_ : Dict = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) snake_case_ : List[str] = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) snake_case_ : int = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) snake_case_ : List[str] = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) snake_case_ : List[Any] = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) snake_case_ : Dict = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) snake_case_ : str = array(_UpperCamelCase ) for i in range(3 ): for j in range(3 ): snake_case_ : str = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix snake_case_ : List[str] = array(_UpperCamelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_UpperCamelCase ) # Calculate the inverse of the matrix return [[float(d(_UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
279
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available lowerCAmelCase_ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : lowerCamelCase_ : str lowerCamelCase_ : List[str] lowerCamelCase_ : Optional[List[str]] @dataclass class __lowerCAmelCase : lowerCamelCase_ : List[int] lowerCamelCase_ : List[int] lowerCamelCase_ : Optional[List[int]] = None lowerCamelCase_ : Optional[List[int]] = None class __lowerCAmelCase ( _a ): lowerCamelCase_ : str = '''train''' lowerCamelCase_ : List[str] = '''dev''' lowerCamelCase_ : List[Any] = '''test''' class __lowerCAmelCase : @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> List[InputExample]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ ) -> List[str]: '''simple docstring''' raise NotImplementedError @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__=1 , __magic_name__="[SEP]" , __magic_name__=False , __magic_name__=False , __magic_name__=0 , __magic_name__=0 , __magic_name__=-100 , __magic_name__=0 , __magic_name__=True , ) -> List[InputFeatures]: '''simple docstring''' snake_case_ : Optional[int] = {label: i for i, label in enumerate(__magic_name__ )} snake_case_ : Dict = [] for ex_index, example in enumerate(__magic_name__ ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' , __magic_name__ , len(__magic_name__ ) ) snake_case_ : List[str] = [] snake_case_ : List[str] = [] for word, label in zip(example.words , example.labels ): snake_case_ : Optional[Any] = tokenizer.tokenize(__magic_name__ ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(__magic_name__ ) > 0: tokens.extend(__magic_name__ ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__magic_name__ ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. snake_case_ : Union[str, Any] = tokenizer.num_special_tokens_to_add() if len(__magic_name__ ) > max_seq_length - special_tokens_count: snake_case_ : str = tokens[: (max_seq_length - special_tokens_count)] snake_case_ : Any = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] snake_case_ : Union[str, Any] = [sequence_a_segment_id] * len(__magic_name__ ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: snake_case_ : Union[str, Any] = [cls_token] + tokens snake_case_ : List[Any] = [pad_token_label_id] + label_ids snake_case_ : Optional[Any] = [cls_token_segment_id] + segment_ids snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. snake_case_ : int = [1 if mask_padding_with_zero else 0] * len(__magic_name__ ) # Zero-pad up to the sequence length. snake_case_ : Optional[int] = max_seq_length - len(__magic_name__ ) if pad_on_left: snake_case_ : Optional[Any] = ([pad_token] * padding_length) + input_ids snake_case_ : Optional[int] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask snake_case_ : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids snake_case_ : Dict = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length assert len(__magic_name__ ) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''' ) logger.info('''guid: %s''' , example.guid ) logger.info('''tokens: %s''' , ''' '''.join([str(__magic_name__ ) for x in tokens] ) ) logger.info('''input_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_ids] ) ) logger.info('''input_mask: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_mask] ) ) logger.info('''segment_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in segment_ids] ) ) logger.info('''label_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : int = None features.append( InputFeatures( input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , label_ids=__magic_name__ ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = nn.CrossEntropyLoss().ignore_index def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = os.path.join( __magic_name__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__magic_name__ ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case_ : Dict = cached_features_file + '''.lock''' with FileLock(__magic_name__ ): if os.path.exists(__magic_name__ ) and not overwrite_cache: logger.info(F'''Loading features from cached file {cached_features_file}''' ) snake_case_ : Dict = torch.load(__magic_name__ ) else: logger.info(F'''Creating features from dataset file at {data_dir}''' ) snake_case_ : Any = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F'''Saving features into cached file {cached_features_file}''' ) torch.save(self.features , __magic_name__ ) def __len__(self ) -> Optional[Any]: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class __lowerCAmelCase : lowerCamelCase_ : List[InputFeatures] lowerCamelCase_ : int = -100 def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ : int = token_classification_task.convert_examples_to_features( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ : Optional[Any] = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: snake_case_ : int = tf.data.Dataset.from_generator( __magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] ), '''token_type_ids''': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__(self ) -> str: '''simple docstring''' return len(self.features ) def __getitem__(self , __magic_name__ ) -> InputFeatures: '''simple docstring''' return self.features[i]
279
1
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase_ ( _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : Optional[int] = SwinConfig.from_pretrained( '''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) snake_case_ : Tuple = MaskFormerConfig(backbone_config=_UpperCamelCase ) snake_case_ : Dict = '''huggingface/label-files''' if "ade20k-full" in model_name: # this should be ok snake_case_ : Optional[Any] = 847 snake_case_ : Tuple = '''maskformer-ade20k-full-id2label.json''' elif "ade" in model_name: # this should be ok snake_case_ : Optional[int] = 150 snake_case_ : List[str] = '''ade20k-id2label.json''' elif "coco-stuff" in model_name: # this should be ok snake_case_ : Optional[Any] = 171 snake_case_ : Dict = '''maskformer-coco-stuff-id2label.json''' elif "coco" in model_name: # TODO snake_case_ : Tuple = 133 snake_case_ : Union[str, Any] = '''coco-panoptic-id2label.json''' elif "cityscapes" in model_name: # this should be ok snake_case_ : str = 19 snake_case_ : Dict = '''cityscapes-id2label.json''' elif "vistas" in model_name: # this should be ok snake_case_ : List[Any] = 65 snake_case_ : Optional[Any] = '''mapillary-vistas-id2label.json''' snake_case_ : List[Any] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) snake_case_ : Tuple = {int(_UpperCamelCase ): v for k, v in idalabel.items()} return config def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : str = [] # stem # fmt: off rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') ) rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') ) # heads on top rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') ) for i in range(3 ): rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Tuple = dct.pop(_UpperCamelCase ) snake_case_ : str = val def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): snake_case_ : int = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) snake_case_ : Dict = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) snake_case_ : Union[str, Any] = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : Optional[int] = in_proj_weight[:dim, :] snake_case_ : str = in_proj_bias[: dim] snake_case_ : int = in_proj_weight[ dim : dim * 2, : ] snake_case_ : Any = in_proj_bias[ dim : dim * 2 ] snake_case_ : List[str] = in_proj_weight[ -dim :, : ] snake_case_ : List[Any] = in_proj_bias[-dim :] # fmt: on def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Any = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) snake_case_ : Dict = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) snake_case_ : Optional[Any] = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : Union[str, Any] = in_proj_weight[: hidden_size, :] snake_case_ : Dict = in_proj_bias[:config.hidden_size] snake_case_ : str = in_proj_weight[hidden_size : hidden_size * 2, :] snake_case_ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2] snake_case_ : int = in_proj_weight[-hidden_size :, :] snake_case_ : List[Any] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) snake_case_ : List[str] = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) snake_case_ : List[str] = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : str = in_proj_weight[: hidden_size, :] snake_case_ : List[str] = in_proj_bias[:config.hidden_size] snake_case_ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :] snake_case_ : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2] snake_case_ : Any = in_proj_weight[-hidden_size :, :] snake_case_ : Union[str, Any] = in_proj_bias[-hidden_size :] # fmt: on def lowerCamelCase_ ( ) -> torch.Tensor: """simple docstring""" snake_case_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ : Tuple = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ) -> int: """simple docstring""" snake_case_ : Tuple = get_maskformer_config(_UpperCamelCase ) # load original state_dict with open(_UpperCamelCase , '''rb''' ) as f: snake_case_ : Optional[Any] = pickle.load(_UpperCamelCase ) snake_case_ : Optional[int] = data['''model'''] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys snake_case_ : str = create_rename_keys(_UpperCamelCase ) for src, dest in rename_keys: rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) read_in_swin_q_k_v(_UpperCamelCase , config.backbone_config ) read_in_decoder_q_k_v(_UpperCamelCase , _UpperCamelCase ) # update to torch tensors for key, value in state_dict.items(): snake_case_ : str = torch.from_numpy(_UpperCamelCase ) # load 🤗 model snake_case_ : Tuple = MaskFormerForInstanceSegmentation(_UpperCamelCase ) model.eval() for name, param in model.named_parameters(): print(_UpperCamelCase , param.shape ) snake_case_ , snake_case_ : Tuple = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(_UpperCamelCase ) == 0, f'''Unexpected keys: {unexpected_keys}''' # verify results snake_case_ : Optional[Any] = prepare_img() if "vistas" in model_name: snake_case_ : List[str] = 65 elif "cityscapes" in model_name: snake_case_ : Dict = 65_535 else: snake_case_ : Optional[int] = 255 snake_case_ : Dict = True if '''ade''' in model_name else False snake_case_ : Optional[int] = MaskFormerImageProcessor(ignore_index=_UpperCamelCase , reduce_labels=_UpperCamelCase ) snake_case_ : str = image_processor(_UpperCamelCase , return_tensors='''pt''' ) snake_case_ : Any = model(**_UpperCamelCase ) print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": snake_case_ : Optional[int] = torch.tensor( [[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) image_processor.save_pretrained(_UpperCamelCase ) if push_to_hub: print('''Pushing model and image processor to the hub...''' ) model.push_to_hub(f'''nielsr/{model_name}''' ) image_processor.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''maskformer-swin-tiny-ade''', type=str, help=('''Name of the MaskFormer model you\'d like to convert''',), ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''', type=str, help='''Path to the original state dict (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase_ = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
279
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[int] = SpeechTaTokenizer lowerCamelCase_ : int = False lowerCamelCase_ : Dict = True def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case_ : Tuple = SpeechTaTokenizer(__magic_name__ ) snake_case_ : Any = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ ) snake_case_ : int = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Dict = '''this is a test''' snake_case_ : int = '''this is a test''' return input_text, output_text def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> List[Any]: '''simple docstring''' snake_case_ , snake_case_ : int = self.get_input_output_texts(__magic_name__ ) snake_case_ : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) snake_case_ : Any = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) return text, ids def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = '''<pad>''' snake_case_ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(__magic_name__ ) , 81 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : int = tokenizer.vocab_size snake_case_ : Optional[Any] = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ : List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] snake_case_ : List[Any] = tokenizer.add_tokens(__magic_name__ ) snake_case_ : Dict = tokenizer.vocab_size snake_case_ : Optional[Any] = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) ) snake_case_ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} snake_case_ : List[str] = tokenizer.add_special_tokens(__magic_name__ ) snake_case_ : Dict = tokenizer.vocab_size snake_case_ : Dict = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) ) snake_case_ : Tuple = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> List[str]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.get_tokenizer() snake_case_ : Optional[Any] = tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ ) # fmt: off self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = [ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off snake_case_ : List[Any] = { '''input_ids''': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
279
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = tempfile.mkdtemp() snake_case_ : Union[str, Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) snake_case_ : int = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073], '''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711], } snake_case_ : Dict = os.path.join(self.tmpdirname , __magic_name__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , **__magic_name__ ) -> Dict: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowerCamelCase (self , **__magic_name__ ) -> Optional[int]: '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowerCamelCase (self , **__magic_name__ ) -> str: '''simple docstring''' return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ : Optional[int] = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Any = self.get_tokenizer() snake_case_ : str = self.get_rust_tokenizer() snake_case_ : int = self.get_image_processor() snake_case_ : List[Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ ) snake_case_ : int = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __magic_name__ ) self.assertIsInstance(processor_fast.tokenizer , __magic_name__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __magic_name__ ) self.assertIsInstance(processor_fast.image_processor , __magic_name__ ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : Any = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) snake_case_ : Dict = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) snake_case_ : List[str] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.get_image_processor() snake_case_ : str = self.get_tokenizer() snake_case_ : Dict = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) snake_case_ : Optional[Any] = self.prepare_image_inputs() snake_case_ : int = image_processor(__magic_name__ , return_tensors='''np''' ) snake_case_ : Any = processor(images=__magic_name__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.get_image_processor() snake_case_ : str = self.get_tokenizer() snake_case_ : Any = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) snake_case_ : List[Any] = '''lower newer''' snake_case_ : str = processor(text=__magic_name__ ) snake_case_ : Optional[int] = tokenizer(__magic_name__ , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = self.get_image_processor() snake_case_ : str = self.get_tokenizer() snake_case_ : str = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) snake_case_ : Optional[int] = '''lower newer''' snake_case_ : Any = self.prepare_image_inputs() snake_case_ : List[str] = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = self.get_image_processor() snake_case_ : Optional[Any] = self.get_tokenizer() snake_case_ : str = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) snake_case_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ : List[str] = processor.batch_decode(__magic_name__ ) snake_case_ : Union[str, Any] = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.get_image_processor() snake_case_ : Any = self.get_tokenizer() snake_case_ : Union[str, Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) snake_case_ : Optional[int] = '''lower newer''' snake_case_ : Any = self.prepare_image_inputs() snake_case_ : int = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
279
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): snake_case_ : Dict = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(_UpperCamelCase ): # looping through rows of graph array for i in range(_UpperCamelCase ): # looping through columns of graph array for j in range(_UpperCamelCase ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): snake_case_ : List[Any] = dist[i][k] + dist[k][j] _print_dist(_UpperCamelCase , _UpperCamelCase ) return dist, v if __name__ == "__main__": lowerCAmelCase_ = int(input('''Enter number of vertices: ''')) lowerCAmelCase_ = int(input('''Enter number of edges: ''')) lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)] for i in range(v): lowerCAmelCase_ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('''\nEdge ''', i + 1) lowerCAmelCase_ = int(input('''Enter source:''')) lowerCAmelCase_ = int(input('''Enter destination:''')) lowerCAmelCase_ = float(input('''Enter weight:''')) lowerCAmelCase_ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
279
1
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bool: """simple docstring""" snake_case_ : Optional[Any] = len(_UpperCamelCase ) snake_case_ : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): snake_case_ : Tuple = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): snake_case_ : Tuple = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: snake_case_ : Dict = subset[i - 1][j] if arr[i - 1] <= j: snake_case_ : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
279
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class __lowerCAmelCase : def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' return None class __lowerCAmelCase : def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]: '''simple docstring''' return None class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ ) @require_torch @slow def lowerCamelCase (self ) -> int: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ ) @require_torch @slow def lowerCamelCase (self ) -> int: '''simple docstring''' from transformers import BertModel snake_case_ : str = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(__magic_name__ ) ) vocab_file.flush() snake_case_ : Optional[Any] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: snake_case_ : str = BertModel(BertConfig(vocab_size=len(__magic_name__ ) ) ) model.save_pretrained(__magic_name__ ) self._test_export(__magic_name__ , '''pt''' , 12 , __magic_name__ ) @require_tf @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: snake_case_ : Tuple = self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ ) snake_case_ : List[str] = quantize(Path(__magic_name__ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: snake_case_ : Any = self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ ) snake_case_ : Any = quantize(__magic_name__ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: snake_case_ : List[str] = Path(__magic_name__ ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) return path except Exception as e: self.fail(__magic_name__ ) @require_torch @require_tokenizers @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' from transformers import BertModel snake_case_ : Optional[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) snake_case_ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''pt''' ) @require_tf @require_tokenizers @slow def lowerCamelCase (self ) -> List[str]: '''simple docstring''' from transformers import TFBertModel snake_case_ : Any = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) snake_case_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''tf''' ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : Tuple = FeatureExtractionPipeline(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = infer_shapes(__magic_name__ , __magic_name__ ) # Assert all variables are present self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __magic_name__ ) self.assertSequenceEqual(variable_names[3:] , __magic_name__ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] snake_case_ : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} snake_case_ , snake_case_ : Tuple = ensure_valid_input(FuncContiguousArgs() , __magic_name__ , __magic_name__ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__magic_name__ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__magic_name__ ) , set(__magic_name__ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__magic_name__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) snake_case_ , snake_case_ : Dict = ensure_valid_input(FuncNonContiguousArgs() , __magic_name__ , __magic_name__ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
279
1
from typing import Any def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> list: """simple docstring""" _validation( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) # Creates data structures and fill initial step snake_case_ : dict = {} snake_case_ : dict = {} for state in states_space: snake_case_ : Dict = observations_space[0] snake_case_ : Tuple = ( initial_probabilities[state] * emission_probabilities[state][observation] ) snake_case_ : str = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(_UpperCamelCase ) ): snake_case_ : List[Any] = observations_space[o] snake_case_ : Optional[Any] = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function snake_case_ : str = '''''' snake_case_ : Any = -1 for k_state in states_space: snake_case_ : Tuple = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: snake_case_ : Optional[Any] = probability snake_case_ : str = k_state # Update probabilities and pointers dicts snake_case_ : int = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) snake_case_ : Tuple = arg_max # The final observation snake_case_ : Any = observations_space[len(_UpperCamelCase ) - 1] # argmax for given final observation snake_case_ : List[Any] = '''''' snake_case_ : str = -1 for k_state in states_space: snake_case_ : Dict = probabilities[(k_state, final_observation)] if probability > max_probability: snake_case_ : Optional[Any] = probability snake_case_ : Tuple = k_state snake_case_ : Optional[Any] = arg_max # Process pointers backwards snake_case_ : Any = last_state snake_case_ : str = [] for o in range(len(_UpperCamelCase ) - 1 , -1 , -1 ): result.append(_UpperCamelCase ) snake_case_ : Union[str, Any] = pointers[previous, observations_space[o]] result.reverse() return result def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> None: """simple docstring""" _validate_not_empty( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) _validate_lists(_UpperCamelCase , _UpperCamelCase ) _validate_dicts( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> None: """simple docstring""" if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None: """simple docstring""" _validate_list(_UpperCamelCase , '''observations_space''' ) _validate_list(_UpperCamelCase , '''states_space''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None: """simple docstring""" if not isinstance(_object , _UpperCamelCase ): snake_case_ : Optional[int] = f'''{var_name} must be a list''' raise ValueError(_UpperCamelCase ) else: for x in _object: if not isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Tuple = f'''{var_name} must be a list of strings''' raise ValueError(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> None: """simple docstring""" _validate_dict(_UpperCamelCase , '''initial_probabilities''' , _UpperCamelCase ) _validate_nested_dict(_UpperCamelCase , '''transition_probabilities''' ) _validate_nested_dict(_UpperCamelCase , '''emission_probabilities''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None: """simple docstring""" _validate_dict(_object , _UpperCamelCase , _UpperCamelCase ) for x in _object.values(): _validate_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ) -> None: """simple docstring""" if not isinstance(_object , _UpperCamelCase ): snake_case_ : int = f'''{var_name} must be a dict''' raise ValueError(_UpperCamelCase ) if not all(isinstance(_UpperCamelCase , _UpperCamelCase ) for x in _object ): snake_case_ : Dict = f'''{var_name} all keys must be strings''' raise ValueError(_UpperCamelCase ) if not all(isinstance(_UpperCamelCase , _UpperCamelCase ) for x in _object.values() ): snake_case_ : int = '''nested dictionary ''' if nested else '''''' snake_case_ : str = f'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(_UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
279
lowerCAmelCase_ = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case_ : str = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {", ".join(_UpperCamelCase )}''' ) raise ValueError(_UpperCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
279
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCAmelCase_ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" snake_case_ : Tuple = state_dict.pop(_UpperCamelCase ) snake_case_ : str = val def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" snake_case_ : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case_ : Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) snake_case_ : str = value else: snake_case_ : Union[str, Any] = value return new_state_dict def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Union[str, Any]: """simple docstring""" snake_case_ : Union[str, Any] = '''''' if is_panoptic: snake_case_ : Optional[int] = '''conditional_detr.''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case_ : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) snake_case_ : Tuple = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : List[Any] = in_proj_weight[:256, :] snake_case_ : List[str] = in_proj_bias[:256] snake_case_ : Tuple = in_proj_weight[256:512, :] snake_case_ : Union[str, Any] = in_proj_bias[256:512] snake_case_ : Optional[Any] = in_proj_weight[-256:, :] snake_case_ : List[Any] = in_proj_bias[-256:] def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" snake_case_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ : Union[str, Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Dict = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case_ : List[Any] = '''resnet101''' if "dc5" in model_name: snake_case_ : List[str] = True snake_case_ : str = '''panoptic''' in model_name if is_panoptic: snake_case_ : str = 250 else: snake_case_ : Optional[Any] = 91 snake_case_ : Optional[Any] = '''huggingface/label-files''' snake_case_ : Union[str, Any] = '''coco-detection-id2label.json''' snake_case_ : Union[str, Any] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) snake_case_ : Optional[int] = {int(_UpperCamelCase ): v for k, v in idalabel.items()} snake_case_ : Tuple = idalabel snake_case_ : List[str] = {v: k for k, v in idalabel.items()} # load image processor snake_case_ : str = '''coco_panoptic''' if is_panoptic else '''coco_detection''' snake_case_ : Optional[Any] = ConditionalDetrImageProcessor(format=_UpperCamelCase ) # prepare image snake_case_ : Dict = prepare_img() snake_case_ : Dict = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ) snake_case_ : Union[str, Any] = encoding['''pixel_values'''] logger.info(f'''Converting model {model_name}...''' ) # load original model from torch hub snake_case_ : Any = torch.hub.load('''DeppMeng/ConditionalDETR''' , _UpperCamelCase , pretrained=_UpperCamelCase ).eval() snake_case_ : Optional[Any] = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case_ : List[str] = '''conditional_detr.''' + src rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ : int = rename_backbone_keys(_UpperCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(_UpperCamelCase , is_panoptic=_UpperCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case_ : str = '''conditional_detr.model.''' if is_panoptic else '''model.''' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('''conditional_detr''' ) and not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ) ): snake_case_ : int = state_dict.pop(_UpperCamelCase ) snake_case_ : Dict = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case_ : Optional[Any] = state_dict.pop(_UpperCamelCase ) snake_case_ : List[str] = val elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ): continue else: snake_case_ : List[Any] = state_dict.pop(_UpperCamelCase ) snake_case_ : Tuple = val else: if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): snake_case_ : Any = state_dict.pop(_UpperCamelCase ) snake_case_ : Union[str, Any] = val # finally, create HuggingFace model and load state dict snake_case_ : int = ConditionalDetrForSegmentation(_UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) model.eval() model.push_to_hub(repo_id=_UpperCamelCase , organization='''DepuMeng''' , commit_message='''Add model''' ) # verify our conversion snake_case_ : Any = conditional_detr(_UpperCamelCase ) snake_case_ : Union[str, Any] = model(_UpperCamelCase ) assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 ) # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) image_processor.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
279
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets lowerCAmelCase_ = datasets.logging.get_logger(__name__) lowerCAmelCase_ = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' lowerCAmelCase_ = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' lowerCAmelCase_ = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' lowerCAmelCase_ = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , ) def lowerCamelCase (self , __magic_name__ ) -> List[Any]: '''simple docstring''' if self.config_name == "default": logger.warning( '''Using default BLEURT-Base checkpoint for sequence maximum length 128. ''' '''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' ) snake_case_ : Dict = '''bleurt-base-128''' if self.config_name.lower() in CHECKPOINT_URLS: snake_case_ : Optional[int] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: snake_case_ : Union[str, Any] = self.config_name.upper() else: raise KeyError( F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ ) return {"scores": scores}
279
1
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''efficientnet''' def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> str: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : Optional[Any] = num_channels snake_case_ : List[str] = image_size snake_case_ : str = width_coefficient snake_case_ : Tuple = depth_coefficient snake_case_ : Optional[int] = depth_divisor snake_case_ : List[str] = kernel_sizes snake_case_ : List[str] = in_channels snake_case_ : Tuple = out_channels snake_case_ : Optional[Any] = depthwise_padding snake_case_ : Dict = strides snake_case_ : int = num_block_repeats snake_case_ : Optional[Any] = expand_ratios snake_case_ : List[Any] = squeeze_expansion_ratio snake_case_ : Optional[Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dim snake_case_ : str = pooling_type snake_case_ : Optional[int] = initializer_range snake_case_ : str = batch_norm_eps snake_case_ : Union[str, Any] = batch_norm_momentum snake_case_ : Optional[Any] = dropout_rate snake_case_ : Union[str, Any] = drop_connect_rate snake_case_ : int = sum(__magic_name__ ) * 4 class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[Any] = version.parse('''1.11''' ) @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase (self ) -> float: '''simple docstring''' return 1e-5
279
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''): lowerCAmelCase_ = { '''linear''': PIL.Image.Resampling.BILINEAR, '''bilinear''': PIL.Image.Resampling.BILINEAR, '''bicubic''': PIL.Image.Resampling.BICUBIC, '''lanczos''': PIL.Image.Resampling.LANCZOS, '''nearest''': PIL.Image.Resampling.NEAREST, } else: lowerCAmelCase_ = { '''linear''': PIL.Image.LINEAR, '''bilinear''': PIL.Image.BILINEAR, '''bicubic''': PIL.Image.BICUBIC, '''lanczos''': PIL.Image.LANCZOS, '''nearest''': PIL.Image.NEAREST, } def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Dict = (images / 2 + 0.5).clamp(0 , 1 ) snake_case_ : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() snake_case_ : int = numpy_to_pil(_UpperCamelCase ) return images def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" if images.ndim == 3: snake_case_ : Optional[Any] = images[None, ...] snake_case_ : Any = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images snake_case_ : str = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: snake_case_ : List[Any] = [Image.fromarray(_UpperCamelCase ) for image in images] return pil_images
279
1
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : List[str] = StableDiffusionPipeline.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors snake_case_ : Dict = load_file(_UpperCamelCase ) snake_case_ : List[Any] = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: snake_case_ : List[str] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) snake_case_ : Optional[int] = pipeline.text_encoder else: snake_case_ : Optional[Any] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) snake_case_ : Union[str, Any] = pipeline.unet # find the target layer snake_case_ : Any = layer_infos.pop(0 ) while len(_UpperCamelCase ) > -1: try: snake_case_ : Union[str, Any] = curr_layer.__getattr__(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: snake_case_ : Any = layer_infos.pop(0 ) elif len(_UpperCamelCase ) == 0: break except Exception: if len(_UpperCamelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: snake_case_ : Tuple = layer_infos.pop(0 ) snake_case_ : int = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(_UpperCamelCase ) else: pair_keys.append(_UpperCamelCase ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: snake_case_ : Tuple = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) snake_case_ : Dict = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 ) else: snake_case_ : int = state_dict[pair_keys[0]].to(torch.floataa ) snake_case_ : Tuple = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase ) # update visited list for item in pair_keys: visited.append(_UpperCamelCase ) return pipeline if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.''' ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors''' ) parser.add_argument( '''--lora_prefix_text_encoder''', default='''lora_te''', type=str, help='''The prefix of text encoder weight in safetensors''', ) parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''') parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''' ) parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = args.base_model_path lowerCAmelCase_ = args.checkpoint_path lowerCAmelCase_ = args.dump_path lowerCAmelCase_ = args.lora_prefix_unet lowerCAmelCase_ = args.lora_prefix_text_encoder lowerCAmelCase_ = args.alpha lowerCAmelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) lowerCAmelCase_ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
279
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Any = BioGptTokenizer lowerCamelCase_ : Optional[Any] = False def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : Optional[Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(__magic_name__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : str = '''lower newer''' snake_case_ : Dict = '''lower newer''' return input_text, output_text def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file ) snake_case_ : Union[str, Any] = '''lower''' snake_case_ : Optional[int] = ['''low''', '''er</w>'''] snake_case_ : Any = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = tokens + ['''<unk>'''] snake_case_ : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ ) snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
279
1
lowerCAmelCase_ = ''' # Transformers 설치 방법 ! pip install transformers datasets # 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요. # ! pip install git+https://github.com/huggingface/transformers.git ''' lowerCAmelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowerCAmelCase_ = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
279
from __future__ import annotations def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]: """simple docstring""" snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) ) snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )] index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase ) snake_case_ : float = 0 snake_case_ : list[float] = [0] * len(_UpperCamelCase ) for i in index: if weight[i] <= capacity: snake_case_ : Dict = 1 max_value += value[i] capacity -= weight[i] else: snake_case_ : Union[str, Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
279
1
def lowerCamelCase_ ( ) -> Dict: """simple docstring""" snake_case_ : List[Any] = 0 for i in range(1 , 1_001 ): total += i**i return str(_UpperCamelCase )[-10:] if __name__ == "__main__": print(solution())
279
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict: '''simple docstring''' snake_case_ : Tuple = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Union[str, Any] = num_channels snake_case_ : Optional[Any] = image_size snake_case_ : int = min_resolution snake_case_ : Any = max_resolution snake_case_ : Tuple = do_resize snake_case_ : str = size_divisor snake_case_ : Optional[Any] = do_rescale def lowerCamelCase (self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = GLPNImageProcessingTester(self ) @property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) ) self.assertTrue(hasattr(__magic_name__ , '''resample''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
279
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple[int, int]: """simple docstring""" def constraint_to_multiple_of(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=0 , _UpperCamelCase=None ): snake_case_ : List[Any] = round(val / multiple ) * multiple if max_val is not None and x > max_val: snake_case_ : Tuple = math.floor(val / multiple ) * multiple if x < min_val: snake_case_ : Union[str, Any] = math.ceil(val / multiple ) * multiple return x snake_case_ : Optional[int] = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size snake_case_ , snake_case_ : List[str] = get_image_size(_UpperCamelCase ) snake_case_ , snake_case_ : Dict = output_size # determine new height and width snake_case_ : Any = output_height / input_height snake_case_ : Any = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width snake_case_ : str = scale_width else: # fit height snake_case_ : int = scale_height snake_case_ : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase ) snake_case_ : str = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase ) return (new_height, new_width) class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = ['''pixel_values'''] def __init__(self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = False , __magic_name__ = 1 , __magic_name__ = True , __magic_name__ = 1 / 255 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> None: '''simple docstring''' super().__init__(**__magic_name__ ) snake_case_ : List[Any] = size if size is not None else {'''height''': 384, '''width''': 384} snake_case_ : List[str] = get_size_dict(__magic_name__ ) snake_case_ : Dict = do_resize snake_case_ : Optional[int] = size snake_case_ : int = keep_aspect_ratio snake_case_ : Optional[Any] = ensure_multiple_of snake_case_ : str = resample snake_case_ : Union[str, Any] = do_rescale snake_case_ : List[Any] = rescale_factor snake_case_ : List[str] = do_normalize snake_case_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = False , __magic_name__ = 1 , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray: '''simple docstring''' snake_case_ : Optional[int] = get_size_dict(__magic_name__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) snake_case_ : int = get_resize_output_image_size( __magic_name__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=__magic_name__ , multiple=__magic_name__ , ) return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> Optional[Any]: '''simple docstring''' return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray: '''simple docstring''' return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ) -> PIL.Image.Image: '''simple docstring''' snake_case_ : Dict = do_resize if do_resize is not None else self.do_resize snake_case_ : int = size if size is not None else self.size snake_case_ : List[str] = get_size_dict(__magic_name__ ) snake_case_ : str = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio snake_case_ : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of snake_case_ : int = resample if resample is not None else self.resample snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : List[str] = image_mean if image_mean is not None else self.image_mean snake_case_ : Dict = image_std if image_std is not None else self.image_std snake_case_ : int = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. snake_case_ : Optional[int] = [to_numpy_array(__magic_name__ ) for image in images] if do_resize: snake_case_ : Any = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images] if do_rescale: snake_case_ : Tuple = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images] if do_normalize: snake_case_ : Optional[int] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images] snake_case_ : List[str] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] snake_case_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__magic_name__ ) != len(__magic_name__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(__magic_name__ ): snake_case_ : List[Any] = target_sizes.numpy() snake_case_ : Union[str, Any] = [] for idx in range(len(__magic_name__ ) ): snake_case_ : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__magic_name__ ) snake_case_ : Any = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__magic_name__ ) else: snake_case_ : str = logits.argmax(dim=1 ) snake_case_ : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
279
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
279
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class __lowerCAmelCase ( _a ): lowerCamelCase_ : List[str] = '''gpt_bigcode''' lowerCamelCase_ : Dict = ['''past_key_values'''] lowerCamelCase_ : List[str] = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__(self , __magic_name__=5_0257 , __magic_name__=1024 , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=None , __magic_name__="gelu_pytorch_tanh" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0256 , __magic_name__=5_0256 , __magic_name__=True , __magic_name__=True , __magic_name__=True , **__magic_name__ , ) -> Optional[int]: '''simple docstring''' snake_case_ : List[Any] = vocab_size snake_case_ : int = n_positions snake_case_ : Optional[Any] = n_embd snake_case_ : List[str] = n_layer snake_case_ : Optional[int] = n_head snake_case_ : Any = n_inner snake_case_ : List[Any] = activation_function snake_case_ : Optional[Any] = resid_pdrop snake_case_ : Dict = embd_pdrop snake_case_ : List[str] = attn_pdrop snake_case_ : Any = layer_norm_epsilon snake_case_ : Union[str, Any] = initializer_range snake_case_ : str = scale_attn_weights snake_case_ : Union[str, Any] = use_cache snake_case_ : Any = attention_softmax_in_fpaa snake_case_ : Optional[int] = scale_attention_softmax_in_fpaa snake_case_ : Optional[int] = multi_query snake_case_ : List[Any] = bos_token_id snake_case_ : Union[str, Any] = eos_token_id super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
279
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase_ = float('''nan''') class __lowerCAmelCase : def __init__(self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : List[Any] = sys.stdout snake_case_ : int = open(__magic_name__ , '''a''' ) def __getattr__(self , __magic_name__ ) -> Dict: '''simple docstring''' return getattr(self.stdout , __magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' self.stdout.write(__magic_name__ ) # strip tqdm codes self.file.write(re.sub(R'''^.*\r''' , '''''' , __magic_name__ , 0 , re.M ) ) def lowerCamelCase_ ( _UpperCamelCase=80 , _UpperCamelCase=False ) -> str: """simple docstring""" snake_case_ : str = [] # deal with critical env vars snake_case_ : int = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: snake_case_ : Optional[int] = os.environ.get(_UpperCamelCase , _UpperCamelCase ) if val is not None: cmd.append(f'''{key}={val}''' ) # python executable (not always needed if the script is executable) snake_case_ : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(_UpperCamelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes snake_case_ : Dict = [] snake_case_ : Dict = '''''' while len(_UpperCamelCase ) > 0: current_line += f'''{cmd.pop(0 )} ''' if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(_UpperCamelCase ) snake_case_ : List[Any] = '''''' return "\\\n".join(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" snake_case_ : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd ) # remove --output_dir if any and set our own snake_case_ : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd ) args.base_cmd += f''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir snake_case_ : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , ) snake_case_ : Tuple = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase ) if verbose: print('''STDOUT''' , result.stdout ) print('''STDERR''' , result.stderr ) # save the streams snake_case_ : Any = variation.replace(''' ''' , '''-''' ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f: f.write(result.stdout ) with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f: snake_case_ : str = json.load(_UpperCamelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple: """simple docstring""" snake_case_ : Tuple = [] snake_case_ : Any = [] snake_case_ : int = f'''{id}: {variation:<{longest_variation_len}}''' snake_case_ : Optional[Any] = f'''{preamble}: ''' snake_case_ : Optional[int] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ): snake_case_ : int = process_run_single( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ : List[str] = single_run_metrics[target_metric_key] if not math.isnan(_UpperCamelCase ): metrics.append(_UpperCamelCase ) results.append(_UpperCamelCase ) outcome += "✓" else: outcome += "✘" snake_case_ : Any = f'''\33[2K\r{outcome}''' if len(_UpperCamelCase ) > 0: snake_case_ : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} snake_case_ : Any = round(mean_metrics[target_metric_key] , 2 ) snake_case_ : List[str] = f'''{outcome} {mean_target}''' if len(_UpperCamelCase ) > 1: results_str += f''' {tuple(round(_UpperCamelCase , 2 ) for x in results )}''' print(_UpperCamelCase ) snake_case_ : Optional[int] = variation return mean_metrics else: print(_UpperCamelCase ) return {variation_key: variation, target_metric_key: nan} def lowerCamelCase_ ( ) -> Optional[int]: """simple docstring""" snake_case_ : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return f''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" snake_case_ : str = pd.DataFrame(_UpperCamelCase ) snake_case_ : Optional[int] = '''variation''' snake_case_ : Union[str, Any] = '''diff_%''' snake_case_ : Optional[int] = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan snake_case_ : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(_UpperCamelCase ): # as a fallback, use the minimal value as the sentinel snake_case_ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(_UpperCamelCase ): snake_case_ : Dict = df.apply( lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='''columns''' , ) # re-order columns snake_case_ : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys] snake_case_ : int = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols # capitalize snake_case_ : Optional[int] = df.rename(str.capitalize , axis='''columns''' ) # make the cols as narrow as possible snake_case_ : Any = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' ) snake_case_ : int = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' ) snake_case_ : Tuple = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )] print('''\n\n'''.join(_UpperCamelCase ) ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) snake_case_ : Tuple = parser.parse_args() snake_case_ : Optional[Any] = args.output_dir Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) snake_case_ : Optional[int] = get_base_command(_UpperCamelCase , _UpperCamelCase ) # split each dimension into its --foo variations snake_case_ : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty snake_case_ : List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) ) snake_case_ : Optional[int] = max(len(_UpperCamelCase ) for x in variations ) # split wanted keys snake_case_ : int = args.report_metric_keys.split() # capture prints into a log file for convenience snake_case_ : str = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(f'''and this script\'s output is also piped into {report_fn}''' ) snake_case_ : Tuple = Tee(_UpperCamelCase ) print(f'''\n*** Running {len(_UpperCamelCase )} benchmarks:''' ) print(f'''Base command: {" ".join(_UpperCamelCase )}''' ) snake_case_ : List[Any] = '''variation''' snake_case_ : Tuple = [] for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ): snake_case_ : Optional[Any] = base_cmd + variation.split() results.append( process_run( id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) ) process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase ) if __name__ == "__main__": main()
279
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict: '''simple docstring''' snake_case_ : Tuple = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Union[str, Any] = num_channels snake_case_ : Optional[Any] = image_size snake_case_ : int = min_resolution snake_case_ : Any = max_resolution snake_case_ : Tuple = do_resize snake_case_ : str = size_divisor snake_case_ : Optional[Any] = do_rescale def lowerCamelCase (self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = GLPNImageProcessingTester(self ) @property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) ) self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) ) self.assertTrue(hasattr(__magic_name__ , '''resample''' ) ) self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
279
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowerCAmelCase_ = CLIPImageProcessor() lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') lowerCAmelCase_ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
279
1
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCAmelCase_ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } lowerCAmelCase_ = { '''facebook/blenderbot_small-90M''': 5_1_2, } class __lowerCAmelCase ( _a ): lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : int = BlenderbotSmallTokenizer def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> Any: '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=__magic_name__ , merges=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , ) , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , **__magic_name__ , ) snake_case_ : List[Any] = add_prefix_space def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> List[str]: '''simple docstring''' snake_case_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]: '''simple docstring''' snake_case_ : Tuple = [self.sep_token_id] snake_case_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
279
from math import factorial lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)} def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) ) def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length snake_case_ : Optional[Any] = 0 # the cached sizes of the previous chains snake_case_ : dict[int, int] = {} for start_chain_element in range(1 , _UpperCamelCase ): # The temporary set will contain the elements of the chain snake_case_ : List[str] = set() snake_case_ : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. snake_case_ : Any = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_UpperCamelCase ) chain_set_length += 1 snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] snake_case_ : List[str] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution()}''')
279
1
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = CustomTokenizer pass
279
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''''' lowerCamelCase_ : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) lowerCamelCase_ : str = None # compression type in fsspec. ex: "gzip" lowerCamelCase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__(self , __magic_name__ = "" , __magic_name__ = None , __magic_name__ = None , **__magic_name__ ) -> Any: '''simple docstring''' super().__init__(self , **__magic_name__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case_ : Union[str, Any] = fsspec.open( __magic_name__ , mode='''rb''' , protocol=__magic_name__ , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) snake_case_ : Tuple = os.path.basename(self.file.path.split('''::''' )[0] ) snake_case_ : Optional[Any] = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) snake_case_ : Dict = None @classmethod def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]: '''simple docstring''' return super()._strip_protocol(__magic_name__ ).lstrip('''/''' ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' if self.dir_cache is None: snake_case_ : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} snake_case_ : List[str] = {f['''name''']: f} def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return self.file.open().read() def lowerCamelCase (self , __magic_name__ , __magic_name__ = "rb" , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = self._strip_protocol(__magic_name__ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''bz2''' lowerCamelCase_ : Any = '''bz2''' lowerCamelCase_ : int = '''.bz2''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''gzip''' lowerCamelCase_ : Dict = '''gzip''' lowerCamelCase_ : int = '''.gz''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''lz4''' lowerCamelCase_ : Any = '''lz4''' lowerCamelCase_ : Optional[Any] = '''.lz4''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Tuple = '''xz''' lowerCamelCase_ : Any = '''xz''' lowerCamelCase_ : int = '''.xz''' class __lowerCAmelCase ( _a ): lowerCamelCase_ : Union[str, Any] = '''zstd''' lowerCamelCase_ : Tuple = '''zstd''' lowerCamelCase_ : Any = '''.zst''' def __init__(self , __magic_name__ , __magic_name__ = "rb" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = DEFAULT_BLOCK_SIZE , **__magic_name__ , ) -> Tuple: '''simple docstring''' super().__init__( fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case_ : Dict = self.file.__enter__ class __lowerCAmelCase : def __init__(self , __magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : str = file_ def __enter__(self ) -> List[Any]: '''simple docstring''' self._file.__enter__() return self def __exit__(self , *__magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' self._file.__exit__(*__magic_name__ , **__magic_name__ ) def __iter__(self ) -> Optional[int]: '''simple docstring''' return iter(self._file ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return next(self._file ) def __getattr__(self , __magic_name__ ) -> str: '''simple docstring''' return getattr(self._file , __magic_name__ ) def fixed_enter(*__magic_name__ , **__magic_name__ ): return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) ) snake_case_ : Tuple = fixed_enter
279
1