code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device UpperCamelCase__ = False class a__ ( unittest.TestCase ): pass @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = pipe( image=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images __lowerCAmelCase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowerCAmelCase = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
92
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ): return x return (x, x) @require_tf class a__ : def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model} __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = after_output[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = np.abs((a - b) ).max() self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_save_load(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs() __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = after_outputs[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFViTModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFViTModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFDeiTModel(_A , name="vision_model" ) __lowerCAmelCase = TFRobertaModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFDeiTModelTester(self ) __lowerCAmelCase = TFRobertaModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = clip_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class a__ ( unittest.TestCase ): @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_A ) __lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __lowerCAmelCase = processor( text=["una foto di un gatto", "una foto di un cane"] , images=_A , padding=_A , return_tensors="np" ) __lowerCAmelCase = model(**_A ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) )
92
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class a__ ( snake_case__ ): _a : Optional[int] = """speech_to_text""" _a : str = ["""past_key_values"""] _a : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , _A=1_0_0_0_0 , _A=1_2 , _A=2_0_4_8 , _A=4 , _A=6 , _A=2_0_4_8 , _A=4 , _A=0.0 , _A=0.0 , _A=True , _A=True , _A="relu" , _A=2_5_6 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.02 , _A=2 , _A=True , _A=1 , _A=0 , _A=2 , _A=6_0_0_0 , _A=1_0_2_4 , _A=2 , _A=(5, 5) , _A=1_0_2_4 , _A=8_0 , _A=1 , **_A , ): """simple docstring""" __lowerCAmelCase = vocab_size __lowerCAmelCase = d_model __lowerCAmelCase = encoder_ffn_dim __lowerCAmelCase = encoder_layers __lowerCAmelCase = encoder_attention_heads __lowerCAmelCase = decoder_ffn_dim __lowerCAmelCase = decoder_layers __lowerCAmelCase = decoder_attention_heads __lowerCAmelCase = dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = activation_dropout __lowerCAmelCase = activation_function __lowerCAmelCase = init_std __lowerCAmelCase = encoder_layerdrop __lowerCAmelCase = decoder_layerdrop __lowerCAmelCase = use_cache __lowerCAmelCase = encoder_layers __lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True __lowerCAmelCase = max_source_positions __lowerCAmelCase = max_target_positions __lowerCAmelCase = num_conv_layers __lowerCAmelCase = list(_A ) __lowerCAmelCase = conv_channels __lowerCAmelCase = input_feat_per_channel __lowerCAmelCase = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` " f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """ f"""`config.num_conv_layers = {self.num_conv_layers}`.""" ) super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , **_A , )
92
import json import os import torch from diffusers import UNetaDModel os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): if hor == 1_28: __lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") __lowerCAmelCase = (32, 1_28, 2_56) __lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D") elif hor == 32: __lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") __lowerCAmelCase = (32, 64, 1_28, 2_56) __lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") __lowerCAmelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" ) __lowerCAmelCase = model.state_dict() __lowerCAmelCase = { "down_block_types": down_block_types, "block_out_channels": block_out_channels, "up_block_types": up_block_types, "layers_per_block": 1, "use_timestep_embedding": True, "out_block_type": "OutConv1DBlock", "norm_num_groups": 8, "downsample_each_block": False, "in_channels": 14, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "flip_sin_to_cos": False, "freq_shift": 1, "sample_size": 6_55_36, "mid_block_type": "MidResTemporalBlock1D", "act_fn": "mish", } __lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ ) torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" ) with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = { "in_channels": 14, "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": (), "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": (32, 64, 1_28, 2_56), "layers_per_block": 1, "downsample_each_block": True, "sample_size": 6_55_36, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1, "norm_num_groups": 8, "act_fn": "mish", } __lowerCAmelCase = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" ) __lowerCAmelCase = model __lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ ) torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" ) with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
92
1
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): return int((input_a, input_a).count(1 ) != 0 ) def _a ( ): assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
92
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): class a__ : def __init__( self , _A ): """simple docstring""" __lowerCAmelCase = metric_id class a__ : _a : Optional[int] = [MetricMock(snake_case__ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ): if "tmp_path" in args: __lowerCAmelCase = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(SCREAMING_SNAKE_CASE_ , match="https://huggingface.co/docs/evaluate" ): func(*SCREAMING_SNAKE_CASE_ )
92
1
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging UpperCamelCase__ = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : nn.ModuleList , SCREAMING_SNAKE_CASE_ : nn.ModuleList , SCREAMING_SNAKE_CASE_ : List[int] ): __lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), F"""{len(SCREAMING_SNAKE_CASE_ )} != {len(SCREAMING_SNAKE_CASE_ )}""" dest_layers.load_state_dict(layers_to_copy.state_dict() ) UpperCamelCase__ = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } UpperCamelCase__ = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): try: __lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first""" F""" {n_student}""" ) return list(range(SCREAMING_SNAKE_CASE_ ) ) def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): if n_student > n_teacher: raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" ) elif n_teacher == n_student: return list(range(SCREAMING_SNAKE_CASE_ ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def _a ( SCREAMING_SNAKE_CASE_ : Union[str, PreTrainedModel] , SCREAMING_SNAKE_CASE_ : Union[str, Path] = "student" , SCREAMING_SNAKE_CASE_ : Union[int, None] = None , SCREAMING_SNAKE_CASE_ : Union[int, None] = None , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[str]=None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ): __lowerCAmelCase = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher." assert (e is not None) or (d is not None), _msg if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ).save_pretrained(SCREAMING_SNAKE_CASE_ ) # purely for convenience __lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).eval() else: assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F"""teacher must be a model or string got type {type(SCREAMING_SNAKE_CASE_ )}""" __lowerCAmelCase = teacher.config.to_diff_dict() try: __lowerCAmelCase , __lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: __lowerCAmelCase = teacher_e if d is None: __lowerCAmelCase = teacher_d init_kwargs.update({"encoder_layers": e, "decoder_layers": d} ) except AttributeError: # T5 if hasattr(teacher.config , "num_encoder_layers" ): __lowerCAmelCase , __lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: __lowerCAmelCase , __lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: __lowerCAmelCase = teacher_e if d is None: __lowerCAmelCase = teacher_d if hasattr(teacher.config , "num_encoder_layers" ): init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} ) else: init_kwargs.update({"num_layers": e, "num_decoder_layers": d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(SCREAMING_SNAKE_CASE_ ) # Copy weights __lowerCAmelCase = teacher.config_class(**SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE_ ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. __lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=SCREAMING_SNAKE_CASE_ ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save __lowerCAmelCase , __lowerCAmelCase = list(range(SCREAMING_SNAKE_CASE_ ) ), list(range(SCREAMING_SNAKE_CASE_ ) ) logger.info( F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to""" F""" {save_path}""" ) student.save_pretrained(SCREAMING_SNAKE_CASE_ ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: __lowerCAmelCase = pick_layers_to_copy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if d_layers_to_copy is None: __lowerCAmelCase = pick_layers_to_copy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) try: if hasattr( SCREAMING_SNAKE_CASE_ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , SCREAMING_SNAKE_CASE_ ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , SCREAMING_SNAKE_CASE_ ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , SCREAMING_SNAKE_CASE_ ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , SCREAMING_SNAKE_CASE_ ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , SCREAMING_SNAKE_CASE_ ) copy_layers(teacher.decoder.block , student.decoder.block , SCREAMING_SNAKE_CASE_ ) logger.info( F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" ) __lowerCAmelCase = { "teacher_type": teacher.config.model_type, "copied_encoder_layers": e_layers_to_copy, "copied_decoder_layers": d_layers_to_copy, } student.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
92
from random import randint from tempfile import TemporaryFile import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ): __lowerCAmelCase = 0 if start < end: __lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = a[end] __lowerCAmelCase = a[pivot] __lowerCAmelCase = temp __lowerCAmelCase , __lowerCAmelCase = _in_place_partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , p - 1 ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , p + 1 , SCREAMING_SNAKE_CASE_ ) return count def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = 0 __lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = a[end] __lowerCAmelCase = a[pivot] __lowerCAmelCase = temp __lowerCAmelCase = start - 1 for index in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value __lowerCAmelCase = new_pivot_index + 1 __lowerCAmelCase = a[new_pivot_index] __lowerCAmelCase = a[index] __lowerCAmelCase = temp __lowerCAmelCase = a[new_pivot_index + 1] __lowerCAmelCase = a[end] __lowerCAmelCase = temp return new_pivot_index + 1, count UpperCamelCase__ = TemporaryFile() UpperCamelCase__ = 100 # 1000 elements are to be sorted UpperCamelCase__ , UpperCamelCase__ = 0, 1 # mean and standard deviation UpperCamelCase__ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array UpperCamelCase__ = np.load(outfile) UpperCamelCase__ = len(M) - 1 UpperCamelCase__ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
92
1
from __future__ import annotations from collections.abc import Callable UpperCamelCase__ = list[list[float | int]] def _a ( SCREAMING_SNAKE_CASE_ : Matrix , SCREAMING_SNAKE_CASE_ : Matrix ): __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(SCREAMING_SNAKE_CASE_ )] __lowerCAmelCase = 42 __lowerCAmelCase = 42 __lowerCAmelCase = 42 __lowerCAmelCase = 42 __lowerCAmelCase = 42 __lowerCAmelCase = 42 for row in range(SCREAMING_SNAKE_CASE_ ): for col in range(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = matrix[row][col] __lowerCAmelCase = vector[row][0] __lowerCAmelCase = 0 __lowerCAmelCase = 0 while row < size and col < size: # pivoting __lowerCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __lowerCAmelCase , __lowerCAmelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = augmented[rowa][col] / augmented[row][col] __lowerCAmelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , SCREAMING_SNAKE_CASE_ ): for row in range(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = augmented[row][col] / augmented[col][col] for cola in range(SCREAMING_SNAKE_CASE_ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(SCREAMING_SNAKE_CASE_ ) ] def _a ( SCREAMING_SNAKE_CASE_ : list[int] ): __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )] __lowerCAmelCase = [[0] for _ in range(SCREAMING_SNAKE_CASE_ )] __lowerCAmelCase = 42 __lowerCAmelCase = 42 __lowerCAmelCase = 42 __lowerCAmelCase = 42 for x_val, y_val in enumerate(SCREAMING_SNAKE_CASE_ ): for col in range(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = (x_val + 1) ** (size - col - 1) __lowerCAmelCase = y_val __lowerCAmelCase = solve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def interpolated_func(SCREAMING_SNAKE_CASE_ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(SCREAMING_SNAKE_CASE_ ) ) return interpolated_func def _a ( SCREAMING_SNAKE_CASE_ : int ): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _a ( SCREAMING_SNAKE_CASE_ : Callable[[int], int] = question_function , SCREAMING_SNAKE_CASE_ : int = 10 ): __lowerCAmelCase = [func(SCREAMING_SNAKE_CASE_ ) for x_val in range(1 , order + 1 )] __lowerCAmelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __lowerCAmelCase = 0 __lowerCAmelCase = 42 __lowerCAmelCase = 42 for poly in polynomials: __lowerCAmelCase = 1 while func(SCREAMING_SNAKE_CASE_ ) == poly(SCREAMING_SNAKE_CASE_ ): x_val += 1 ret += poly(SCREAMING_SNAKE_CASE_ ) return ret if __name__ == "__main__": print(f'''{solution() = }''')
92
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available UpperCamelCase__ = { """configuration_audio_spectrogram_transformer""": [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ASTConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ASTForAudioClassification""", """ASTModel""", """ASTPreTrainedModel""", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""ASTFeatureExtractor"""] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
1
def _a ( SCREAMING_SNAKE_CASE_ : int ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("multiplicative_persistence() only accepts integral values" ) if num < 0: raise ValueError("multiplicative_persistence() does not accept negative values" ) __lowerCAmelCase = 0 __lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: __lowerCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] __lowerCAmelCase = 1 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total *= numbers[i] __lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps def _a ( SCREAMING_SNAKE_CASE_ : int ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("additive_persistence() only accepts integral values" ) if num < 0: raise ValueError("additive_persistence() does not accept negative values" ) __lowerCAmelCase = 0 __lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ ) while len(SCREAMING_SNAKE_CASE_ ) != 1: __lowerCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string] __lowerCAmelCase = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): total += numbers[i] __lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
92
import argparse import os import re import packaging.version UpperCamelCase__ = """examples/""" UpperCamelCase__ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCamelCase__ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCamelCase__ = """README.md""" def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern] __lowerCAmelCase = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not patch: update_version_in_examples(SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = "🤗 Transformers currently provides the following architectures" __lowerCAmelCase = "1. Want to contribute a new model?" with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.readlines() # Find the start of the list. __lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): __lowerCAmelCase = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) def _a ( ): with open(REPLACE_FILES["init"] , "r" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0] return packaging.version.parse(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=False ): __lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: __lowerCAmelCase = default_version.base_version elif patch: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = default_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def _a ( ): __lowerCAmelCase = get_version() __lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __lowerCAmelCase = current_version.base_version # Check with the user we got that right. __lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = dev_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCamelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
92
1
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor UpperCamelCase__ = logging.get_logger(__name__) class a__ ( snake_case__ ): def __init__( self , *_A , **_A ): """simple docstring""" warnings.warn( "The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PerceiverImageProcessor instead." , _A , ) super().__init__(*_A , **_A )
92
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a__ ( snake_case__ , unittest.TestCase ): _a : Dict = KandinskyImgaImgPipeline _a : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""] _a : str = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", ] _a : List[Any] = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] _a : int = False @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.time_input_dim @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.time_input_dim * 4 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 1_0_0 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , ) __lowerCAmelCase = MultilingualCLIP(_A ) __lowerCAmelCase = text_encoder.eval() return text_encoder @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase = UNetaDConditionModel(**_A ) return model @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = { "num_train_timesteps": 1_0_0_0, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __lowerCAmelCase = DDIMScheduler(**_A ) __lowerCAmelCase = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ): """simple docstring""" __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A ) __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) if str(_A ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(_A ) else: __lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) __lowerCAmelCase = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 1_0, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**_A ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(_A ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowerCAmelCase = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __lowerCAmelCase = "A red cartoon frog, 4k" __lowerCAmelCase = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(_A ) __lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( _A , generator=_A , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __lowerCAmelCase = pipeline( _A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(_A , _A )
92
1
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a__ : def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = scope __lowerCAmelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCAmelCase = (image_size // patch_size) ** 2 __lowerCAmelCase = num_patches + 2 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTModel(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTForMaskedImageModeling(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForMaskedImageModeling(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = self.type_sequence_label_size __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a__ ( snake_case__ , snake_case__ , unittest.TestCase ): _a : Optional[Any] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _a : int = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _a : Optional[Any] = False _a : Tuple = False _a : Tuple = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ): """simple docstring""" __lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.model_tester.is_training: return __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_A ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __lowerCAmelCase = False __lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue __lowerCAmelCase = model_class(_A ) model.gradient_checkpointing_enable() model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_A ), *get_values(_A ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ): __lowerCAmelCase = problem_type["title"] __lowerCAmelCase = problem_type["num_labels"] __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) if problem_type["num_labels"] > 1: __lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) __lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_A ) as warning_list: __lowerCAmelCase = model(**_A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = DeiTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _a ( ): __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( _A ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**_A ) # verify the logits __lowerCAmelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _A ) __lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ) __lowerCAmelCase = inputs.pixel_values.to(_A ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __lowerCAmelCase = model(_A )
92
class a__ ( snake_case__ ): pass class a__ ( snake_case__ ): pass class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [ [], [], [], ] def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" try: if len(self.queues[priority] ) >= 1_0_0: raise OverflowError("Maximum queue size is 100" ) self.queues[priority].append(_A ) except IndexError: raise ValueError("Valid priorities are 0, 1, and 2" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("All queues are empty" ) def __str__( self ): """simple docstring""" return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) ) class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [] def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if len(self.queue ) == 1_0_0: raise OverFlowError("Maximum queue size is 100" ) self.queue.append(_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.queue: raise UnderFlowError("The queue is empty" ) else: __lowerCAmelCase = min(self.queue ) self.queue.remove(_A ) return data def __str__( self ): """simple docstring""" return str(self.queue ) def _a ( ): __lowerCAmelCase = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 1_00 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _a ( ): __lowerCAmelCase = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(1_00 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
92
1
import argparse import os import re import packaging.version UpperCamelCase__ = """examples/""" UpperCamelCase__ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCamelCase__ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCamelCase__ = """README.md""" def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern] __lowerCAmelCase = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not patch: update_version_in_examples(SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = "🤗 Transformers currently provides the following architectures" __lowerCAmelCase = "1. Want to contribute a new model?" with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.readlines() # Find the start of the list. __lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): __lowerCAmelCase = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) def _a ( ): with open(REPLACE_FILES["init"] , "r" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0] return packaging.version.parse(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=False ): __lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: __lowerCAmelCase = default_version.base_version elif patch: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = default_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def _a ( ): __lowerCAmelCase = get_version() __lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __lowerCAmelCase = current_version.base_version # Check with the user we got that right. __lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = dev_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCamelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
92
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a__ : def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = scope __lowerCAmelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCAmelCase = (image_size // patch_size) ** 2 __lowerCAmelCase = num_patches + 2 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTModel(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTForMaskedImageModeling(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForMaskedImageModeling(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = self.type_sequence_label_size __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a__ ( snake_case__ , snake_case__ , unittest.TestCase ): _a : Optional[Any] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _a : int = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _a : Optional[Any] = False _a : Tuple = False _a : Tuple = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ): """simple docstring""" __lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.model_tester.is_training: return __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_A ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __lowerCAmelCase = False __lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue __lowerCAmelCase = model_class(_A ) model.gradient_checkpointing_enable() model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_A ), *get_values(_A ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ): __lowerCAmelCase = problem_type["title"] __lowerCAmelCase = problem_type["num_labels"] __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) if problem_type["num_labels"] > 1: __lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) __lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_A ) as warning_list: __lowerCAmelCase = model(**_A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = DeiTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _a ( ): __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( _A ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**_A ) # verify the logits __lowerCAmelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _A ) __lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ) __lowerCAmelCase = inputs.pixel_values.to(_A ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __lowerCAmelCase = model(_A )
92
1
from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase__ = logging.get_logger(__name__) class a__ ( snake_case__ ): _a : Union[str, Any] = ["""pixel_values"""] def __init__( self , _A = True , _A = 3_2 , _A=PILImageResampling.BILINEAR , _A = True , **_A , ): """simple docstring""" __lowerCAmelCase = do_resize __lowerCAmelCase = do_rescale __lowerCAmelCase = size_divisor __lowerCAmelCase = resample super().__init__(**_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A = None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = get_image_size(_A ) # Rounds the height and width down to the closest multiple of size_divisor __lowerCAmelCase = height // size_divisor * size_divisor __lowerCAmelCase = width // size_divisor * size_divisor __lowerCAmelCase = resize(_A , (new_h, new_w) , resample=_A , data_format=_A , **_A ) return image def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = None , **_A ): """simple docstring""" return rescale(image=_A , scale=_A , data_format=_A , **_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = None , _A=None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ): """simple docstring""" __lowerCAmelCase = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase = size_divisor if size_divisor is not None else self.size_divisor __lowerCAmelCase = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError("size_divisor is required for resizing" ) __lowerCAmelCase = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError("Invalid image(s)" ) # All transformations expect numpy arrays. __lowerCAmelCase = [to_numpy_array(_A ) for img in images] if do_resize: __lowerCAmelCase = [self.resize(_A , size_divisor=_A , resample=_A ) for image in images] if do_rescale: __lowerCAmelCase = [self.rescale(_A , scale=1 / 2_5_5 ) for image in images] __lowerCAmelCase = [to_channel_dimension_format(_A , _A ) for image in images] __lowerCAmelCase = {"pixel_values": images} return BatchFeature(data=_A , tensor_type=_A )
92
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ): __lowerCAmelCase = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
92
1
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class a__ ( snake_case__ ): _a : Union[str, Any] = """bart""" _a : List[str] = ["""past_key_values"""] _a : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , _A=5_0_2_6_5 , _A=1_0_2_4 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=0.0 , _A=0.0 , _A="gelu" , _A=1_0_2_4 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.02 , _A=0.0 , _A=False , _A=True , _A=3 , _A=1 , _A=0 , _A=2 , _A=True , _A=2 , _A=2 , **_A , ): """simple docstring""" __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = d_model __lowerCAmelCase = encoder_ffn_dim __lowerCAmelCase = encoder_layers __lowerCAmelCase = encoder_attention_heads __lowerCAmelCase = decoder_ffn_dim __lowerCAmelCase = decoder_layers __lowerCAmelCase = decoder_attention_heads __lowerCAmelCase = dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = activation_dropout __lowerCAmelCase = activation_function __lowerCAmelCase = init_std __lowerCAmelCase = encoder_layerdrop __lowerCAmelCase = decoder_layerdrop __lowerCAmelCase = classifier_dropout __lowerCAmelCase = use_cache __lowerCAmelCase = encoder_layers __lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , _A ): __lowerCAmelCase = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." ) class a__ ( snake_case__ ): @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: __lowerCAmelCase = {0: "batch"} __lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"} else: __lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} __lowerCAmelCase = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(_A , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. __lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: __lowerCAmelCase , __lowerCAmelCase = self.num_layers for i in range(_A ): __lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} __lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} else: __lowerCAmelCase = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __lowerCAmelCase = super().outputs else: __lowerCAmelCase = super(_A , self ).outputs if self.use_past: __lowerCAmelCase , __lowerCAmelCase = self.num_layers for i in range(_A ): __lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} __lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ): """simple docstring""" __lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , _A , _A , _A , _A ) # Generate decoder inputs __lowerCAmelCase = seq_length if not self.use_past else 1 __lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , _A , _A , _A , _A ) __lowerCAmelCase = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} __lowerCAmelCase = dict(**_A , **_A ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape __lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1] __lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads __lowerCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowerCAmelCase = decoder_seq_length + 3 __lowerCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowerCAmelCase = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(_A , _A )] , dim=1 ) __lowerCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowerCAmelCase , __lowerCAmelCase = self.num_layers __lowerCAmelCase = min(_A , _A ) __lowerCAmelCase = max(_A , _A ) - min_num_layers __lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(_A ): common_inputs["past_key_values"].append( ( torch.zeros(_A ), torch.zeros(_A ), torch.zeros(_A ), torch.zeros(_A ), ) ) # TODO: test this. __lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(_A , _A ): common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) ) return common_inputs def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ): """simple docstring""" __lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , _A , _A , _A , _A ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape # Not using the same length for past_key_values __lowerCAmelCase = seqlen + 2 __lowerCAmelCase , __lowerCAmelCase = self.num_layers __lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads __lowerCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowerCAmelCase = common_inputs["attention_mask"].dtype __lowerCAmelCase = torch.cat( [common_inputs["attention_mask"], torch.ones(_A , _A , dtype=_A )] , dim=1 ) __lowerCAmelCase = [ (torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A ) ] return common_inputs def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ): """simple docstring""" __lowerCAmelCase = compute_effective_axis_dimension( _A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowerCAmelCase = tokenizer.num_special_tokens_to_add(_A ) __lowerCAmelCase = compute_effective_axis_dimension( _A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A ) # Generate dummy inputs according to compute batch and sequence __lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size __lowerCAmelCase = dict(tokenizer(_A , return_tensors=_A ) ) return common_inputs def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) elif self.task == "causal-lm": __lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) else: __lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) return common_inputs def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __lowerCAmelCase = super()._flatten_past_key_values_(_A , _A , _A , _A ) else: __lowerCAmelCase = super(_A , self )._flatten_past_key_values_( _A , _A , _A , _A )
92
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( """The `image_to_image.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionImg2ImgPipeline` instead.""" )
92
1
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: __lowerCAmelCase = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: __lowerCAmelCase = max( mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , j - wt[i - 1] ) + val[i - 1] , ) __lowerCAmelCase = val return f[i][j] def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: __lowerCAmelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: __lowerCAmelCase = dp[i - 1][w_] return dp[n][w_], dp def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ): if not (isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) )): raise ValueError( "Both the weights and values vectors must be either lists or tuples" ) __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) if num_items != len(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = ( "The number of weights must be the same as the number of values.\n" F"""But got {num_items} weights and {len(SCREAMING_SNAKE_CASE_ )} values""" ) raise ValueError(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): if not isinstance(wt[i] , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = ( "All weights must be integers but got weight of " F"""type {type(wt[i] )} at index {i}""" ) raise TypeError(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase = knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = set() _construct_solution(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return optimal_val, example_optional_set def _a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : set ): # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: optimal_set.add(SCREAMING_SNAKE_CASE_ ) _construct_solution(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCamelCase__ = [3, 2, 4, 4] UpperCamelCase__ = [4, 3, 2, 3] UpperCamelCase__ = 4 UpperCamelCase__ = 6 UpperCamelCase__ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] UpperCamelCase__ , UpperCamelCase__ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 UpperCamelCase__ , UpperCamelCase__ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
92
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType UpperCamelCase__ = get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if accelerator.process_index == 0: logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {ckpt_dir}""" ) __lowerCAmelCase = {"model": state_dict} dist_cp.save_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Model saved to {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) if F"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading model from {ckpt_dir}""" ) __lowerCAmelCase = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , ) __lowerCAmelCase = state_dict["model"] logger.info(F"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state saved in {output_optimizer_file}""" ) else: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Optimizer state saved in {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" ) else: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if F"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading Optimizer from {ckpt_dir}""" ) __lowerCAmelCase = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , ) __lowerCAmelCase = optim_state["optimizer"] logger.info(F"""Optimizer loaded from {ckpt_dir}""" ) __lowerCAmelCase = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
92
1
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class a__ ( snake_case__ ): def __init__( self , _A , _A = None , _A = None , _A = None , _A = False , _A = False , _A = None , _A = None , **_A , ): """simple docstring""" super().__init__( _A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , ) __lowerCAmelCase = field __lowerCAmelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths} __lowerCAmelCase = Json( cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if self.streaming: __lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , ) __lowerCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class a__ : def __init__( self , _A , _A , _A = None , _A = None , **_A , ): """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __lowerCAmelCase = dataset __lowerCAmelCase = path_or_buf __lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __lowerCAmelCase = num_proc __lowerCAmelCase = "utf-8" __lowerCAmelCase = to_json_kwargs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.to_json_kwargs.pop("path_or_buf" , _A ) __lowerCAmelCase = self.to_json_kwargs.pop("orient" , "records" ) __lowerCAmelCase = self.to_json_kwargs.pop("lines" , True if orient == "records" else False ) __lowerCAmelCase = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True ) __lowerCAmelCase = self.to_json_kwargs.pop("compression" , _A ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , "wb" , compression=_A ) as buffer: __lowerCAmelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( f"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" " was passed. Please provide a local path instead." ) __lowerCAmelCase = self._write( file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) return written def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = args __lowerCAmelCase = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) __lowerCAmelCase = batch.to_pandas().to_json( path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A ) if not json_str.endswith("\n" ): json_str += "\n" return json_str.encode(self.encoding ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , **_A , ): """simple docstring""" __lowerCAmelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ): __lowerCAmelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_A ) else: __lowerCAmelCase , __lowerCAmelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ): written += file_obj.write(_A ) return written
92
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class a__ ( snake_case__ ): def __init__( self , *_A , _A=None , _A=None , **_A ): """simple docstring""" super().__init__(*_A , **_A ) __lowerCAmelCase = eval_examples __lowerCAmelCase = post_process_function def __SCREAMING_SNAKE_CASE( self , _A = None , _A=None , _A = None , _A = "eval" , **_A , ): """simple docstring""" __lowerCAmelCase = gen_kwargs.copy() __lowerCAmelCase = ( gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length ) __lowerCAmelCase = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams ) __lowerCAmelCase = gen_kwargs __lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset __lowerCAmelCase = self.get_eval_dataloader(_A ) __lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __lowerCAmelCase = self.compute_metrics __lowerCAmelCase = None __lowerCAmelCase = time.time() __lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowerCAmelCase = eval_loop( _A , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , ) finally: __lowerCAmelCase = compute_metrics __lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __lowerCAmelCase = self.post_process_function(_A , _A , _A ) __lowerCAmelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): __lowerCAmelCase = metrics.pop(_A ) metrics.update(output.metrics ) else: __lowerCAmelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_A ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A ) return metrics def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A = "test" , **_A ): """simple docstring""" __lowerCAmelCase = gen_kwargs.copy() __lowerCAmelCase = self.get_test_dataloader(_A ) # Temporarily disable metric computation, we will do it in the loop here. __lowerCAmelCase = self.compute_metrics __lowerCAmelCase = None __lowerCAmelCase = time.time() __lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowerCAmelCase = eval_loop( _A , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , ) finally: __lowerCAmelCase = compute_metrics __lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output __lowerCAmelCase = self.post_process_function(_A , _A , _A , "predict" ) __lowerCAmelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): __lowerCAmelCase = metrics.pop(_A ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
92
1
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel UpperCamelCase__ = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } UpperCamelCase__ = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str=False ): __lowerCAmelCase , __lowerCAmelCase = create_model( "HTSAT-tiny" , "roberta" , SCREAMING_SNAKE_CASE_ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=SCREAMING_SNAKE_CASE_ , fusion_type="aff_2d" if enable_fusion else None , ) return model, model_cfg def _a ( SCREAMING_SNAKE_CASE_ : Tuple ): __lowerCAmelCase = {} __lowerCAmelCase = R".*sequential.(\d+).*" __lowerCAmelCase = R".*_projection.(\d+).*" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __lowerCAmelCase = key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # replace sequential layers with list __lowerCAmelCase = re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).group(1 ) __lowerCAmelCase = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(SCREAMING_SNAKE_CASE_ )//3}.linear.""" ) elif re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = int(re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... __lowerCAmelCase = 1 if projecton_layer == 0 else 2 __lowerCAmelCase = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value __lowerCAmelCase = value __lowerCAmelCase = mixed_qkv.size(0 ) // 3 __lowerCAmelCase = mixed_qkv[:qkv_dim] __lowerCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2] __lowerCAmelCase = mixed_qkv[qkv_dim * 2 :] __lowerCAmelCase = query_layer __lowerCAmelCase = key_layer __lowerCAmelCase = value_layer else: __lowerCAmelCase = value return model_state_dict def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any=False ): __lowerCAmelCase , __lowerCAmelCase = init_clap(SCREAMING_SNAKE_CASE_ , enable_fusion=SCREAMING_SNAKE_CASE_ ) clap_model.eval() __lowerCAmelCase = clap_model.state_dict() __lowerCAmelCase = rename_state_dict(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = ClapConfig() __lowerCAmelCase = enable_fusion __lowerCAmelCase = ClapModel(SCREAMING_SNAKE_CASE_ ) # ignore the spectrogram embedding layer model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) transformers_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") UpperCamelCase__ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
92
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() ) __lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params UpperCamelCase__ = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ): if metric == "rouge2": __lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": __lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": __lowerCAmelCase = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" " function." ) __lowerCAmelCase = ModelCheckpoint( dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): return EarlyStopping( monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , ) class a__ ( pl.Callback ): def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=True ): """simple docstring""" logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) __lowerCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results __lowerCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": __lowerCAmelCase = od / "test_results.txt" __lowerCAmelCase = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" __lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , "a+" ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __lowerCAmelCase = metrics[key] if isinstance(_A , torch.Tensor ): __lowerCAmelCase = val.item() __lowerCAmelCase = f"""{key}: {val:.6f}\n""" writer.write(_A ) if not save_generations: return if "preds" in metrics: __lowerCAmelCase = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(_A ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" try: __lowerCAmelCase = pl_module.model.model.num_parameters() except AttributeError: __lowerCAmelCase = pl_module.model.num_parameters() __lowerCAmelCase = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , "test" ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
92
1
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE_ : list[float] ): if len(SCREAMING_SNAKE_CASE_ ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) __lowerCAmelCase = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
92
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
92
1
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCamelCase__ = TypeVar("""T""") def _a ( SCREAMING_SNAKE_CASE_ : int ): return (position - 1) // 2 def _a ( SCREAMING_SNAKE_CASE_ : int ): return (2 * position) + 1 def _a ( SCREAMING_SNAKE_CASE_ : int ): return (2 * position) + 2 class a__ ( Generic[T] ): def __init__( self ): """simple docstring""" __lowerCAmelCase = [] __lowerCAmelCase = {} __lowerCAmelCase = 0 def __len__( self ): """simple docstring""" return self.elements def __repr__( self ): """simple docstring""" return str(self.heap ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.elements == 0 def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" self.heap.append((elem, weight) ) __lowerCAmelCase = self.elements self.elements += 1 self._bubble_up(_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __lowerCAmelCase , __lowerCAmelCase = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __lowerCAmelCase , __lowerCAmelCase = self.heap[0] self._bubble_down(_A ) return elem def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = self.position_map[elem] __lowerCAmelCase = (elem, weight) if position > 0: __lowerCAmelCase = get_parent_position(_A ) __lowerCAmelCase , __lowerCAmelCase = self.heap[parent_position] if parent_weight > weight: self._bubble_up(_A ) else: self._bubble_down(_A ) else: self._bubble_down(_A ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = self.position_map[elem] if curr_pos == 0: return None __lowerCAmelCase = get_parent_position(_A ) __lowerCAmelCase , __lowerCAmelCase = self.heap[curr_pos] __lowerCAmelCase , __lowerCAmelCase = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(_A , _A ) return self._bubble_up(_A ) return None def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = self.position_map[elem] __lowerCAmelCase , __lowerCAmelCase = self.heap[curr_pos] __lowerCAmelCase = get_child_left_position(_A ) __lowerCAmelCase = get_child_right_position(_A ) if child_left_position < self.elements and child_right_position < self.elements: __lowerCAmelCase , __lowerCAmelCase = self.heap[child_left_position] __lowerCAmelCase , __lowerCAmelCase = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(_A , _A ) return self._bubble_down(_A ) if child_left_position < self.elements: __lowerCAmelCase , __lowerCAmelCase = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(_A , _A ) return self._bubble_down(_A ) else: return None if child_right_position < self.elements: __lowerCAmelCase , __lowerCAmelCase = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(_A , _A ) return self._bubble_down(_A ) return None def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = self.heap[nodea_pos][0] __lowerCAmelCase = self.heap[nodea_pos][0] __lowerCAmelCase , __lowerCAmelCase = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __lowerCAmelCase = nodea_pos __lowerCAmelCase = nodea_pos class a__ ( Generic[T] ): def __init__( self ): """simple docstring""" __lowerCAmelCase = {} __lowerCAmelCase = 0 def __repr__( self ): """simple docstring""" return str(self.connections ) def __len__( self ): """simple docstring""" return self.nodes def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if node not in self.connections: __lowerCAmelCase = {} self.nodes += 1 def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" self.add_node(_A ) self.add_node(_A ) __lowerCAmelCase = weight __lowerCAmelCase = weight def _a ( SCREAMING_SNAKE_CASE_ : GraphUndirectedWeighted[T] , ): __lowerCAmelCase = {node: maxsize for node in graph.connections} __lowerCAmelCase = {node: None for node in graph.connections} __lowerCAmelCase = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if priority_queue.is_empty(): return dist, parent # initialization __lowerCAmelCase = priority_queue.extract_min() __lowerCAmelCase = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __lowerCAmelCase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(SCREAMING_SNAKE_CASE_ , dist[neighbour] ) __lowerCAmelCase = node # running prim's algorithm while not priority_queue.is_empty(): __lowerCAmelCase = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __lowerCAmelCase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(SCREAMING_SNAKE_CASE_ , dist[neighbour] ) __lowerCAmelCase = node return dist, parent
92
from queue import PriorityQueue from typing import Any import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : PriorityQueue , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue __lowerCAmelCase = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf ) __lowerCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) __lowerCAmelCase = new_cost_f __lowerCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ): __lowerCAmelCase = -1 __lowerCAmelCase = set() __lowerCAmelCase = set() __lowerCAmelCase = {source: 0} __lowerCAmelCase = {destination: 0} __lowerCAmelCase = {source: None} __lowerCAmelCase = {destination: None} __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __lowerCAmelCase , __lowerCAmelCase = queue_forward.get() visited_forward.add(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase = queue_backward.get() visited_backward.add(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = pass_and_relaxation( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) __lowerCAmelCase = pass_and_relaxation( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __lowerCAmelCase = shortest_distance return shortest_path_distance UpperCamelCase__ = { """B""": [["""C""", 1]], """C""": [["""D""", 1]], """D""": [["""F""", 1]], """E""": [["""B""", 1], ["""G""", 2]], """F""": [], """G""": [["""F""", 1]], } UpperCamelCase__ = { """B""": [["""E""", 1]], """C""": [["""B""", 1]], """D""": [["""C""", 1]], """F""": [["""D""", 1], ["""G""", 1]], """E""": [[None, np.inf]], """G""": [["""E""", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
92
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCamelCase__ = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : int ): if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(SCREAMING_SNAKE_CASE_ ): return [[videos]] raise ValueError(F"""Could not make batched video from {videos}""" ) class a__ ( snake_case__ ): _a : Tuple = ["""pixel_values"""] def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = None , _A = True , _A = 1 / 2_5_5 , _A = True , _A = None , _A = None , **_A , ): """simple docstring""" super().__init__(**_A ) __lowerCAmelCase = size if size is not None else {"shortest_edge": 2_2_4} __lowerCAmelCase = get_size_dict(_A , default_to_square=_A ) __lowerCAmelCase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} __lowerCAmelCase = get_size_dict(_A , param_name="crop_size" ) __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = do_center_crop __lowerCAmelCase = crop_size __lowerCAmelCase = resample __lowerCAmelCase = do_rescale __lowerCAmelCase = rescale_factor __lowerCAmelCase = do_normalize __lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ): """simple docstring""" __lowerCAmelCase = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" in size: __lowerCAmelCase = get_resize_output_image_size(_A , size["shortest_edge"] , default_to_square=_A ) elif "height" in size and "width" in size: __lowerCAmelCase = (size["height"], size["width"]) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = None , **_A , ): """simple docstring""" __lowerCAmelCase = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_A , size=(size["height"], size["width"]) , data_format=_A , **_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = None , **_A , ): """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A = None , **_A , ): """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , ): """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __lowerCAmelCase = to_numpy_array(_A ) if do_resize: __lowerCAmelCase = self.resize(image=_A , size=_A , resample=_A ) if do_center_crop: __lowerCAmelCase = self.center_crop(_A , size=_A ) if do_rescale: __lowerCAmelCase = self.rescale(image=_A , scale=_A ) if do_normalize: __lowerCAmelCase = self.normalize(image=_A , mean=_A , std=_A ) __lowerCAmelCase = to_channel_dimension_format(_A , _A ) return image def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ): """simple docstring""" __lowerCAmelCase = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase = resample if resample is not None else self.resample __lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase = image_mean if image_mean is not None else self.image_mean __lowerCAmelCase = image_std if image_std is not None else self.image_std __lowerCAmelCase = size if size is not None else self.size __lowerCAmelCase = get_size_dict(_A , default_to_square=_A ) __lowerCAmelCase = crop_size if crop_size is not None else self.crop_size __lowerCAmelCase = get_size_dict(_A , param_name="crop_size" ) if not valid_images(_A ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) __lowerCAmelCase = make_batched(_A ) __lowerCAmelCase = [ [ self._preprocess_image( image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , ) for img in video ] for video in videos ] __lowerCAmelCase = {"pixel_values": videos} return BatchFeature(data=_A , tensor_type=_A )
92
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class a__ ( snake_case__ ): _a : Optional[int] = """decision_transformer""" _a : Optional[int] = ["""past_key_values"""] _a : Dict = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , _A=1_7 , _A=4 , _A=1_2_8 , _A=4_0_9_6 , _A=True , _A=1 , _A=1_0_2_4 , _A=3 , _A=1 , _A=None , _A="relu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , _A=False , **_A , ): """simple docstring""" __lowerCAmelCase = state_dim __lowerCAmelCase = act_dim __lowerCAmelCase = hidden_size __lowerCAmelCase = max_ep_len __lowerCAmelCase = action_tanh __lowerCAmelCase = vocab_size __lowerCAmelCase = n_positions __lowerCAmelCase = n_layer __lowerCAmelCase = n_head __lowerCAmelCase = n_inner __lowerCAmelCase = activation_function __lowerCAmelCase = resid_pdrop __lowerCAmelCase = embd_pdrop __lowerCAmelCase = attn_pdrop __lowerCAmelCase = layer_norm_epsilon __lowerCAmelCase = initializer_range __lowerCAmelCase = scale_attn_weights __lowerCAmelCase = use_cache __lowerCAmelCase = scale_attn_by_inverse_layer_idx __lowerCAmelCase = reorder_and_upcast_attn __lowerCAmelCase = bos_token_id __lowerCAmelCase = eos_token_id super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
92
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) UpperCamelCase__ = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""ViTFeatureExtractor"""] UpperCamelCase__ = ["""ViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """VIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTForImageClassification""", """ViTForMaskedImageModeling""", """ViTModel""", """ViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """TFViTForImageClassification""", """TFViTModel""", """TFViTPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """FlaxViTForImageClassification""", """FlaxViTModel""", """FlaxViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ): _a : str = StableUnCLIPPipeline _a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS _a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS _a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _a : Optional[Any] = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = 3_2 __lowerCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) ) torch.manual_seed(0 ) __lowerCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , ) torch.manual_seed(0 ) __lowerCAmelCase = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) __lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A ) __lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) ) torch.manual_seed(0 ) __lowerCAmelCase = UNetaDConditionModel( sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , ) torch.manual_seed(0 ) __lowerCAmelCase = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , ) torch.manual_seed(0 ) __lowerCAmelCase = AutoencoderKL() __lowerCAmelCase = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ): """simple docstring""" if str(_A ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(_A ) else: __lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) __lowerCAmelCase = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=_A ) @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) __lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" ) __lowerCAmelCase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowerCAmelCase = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) __lowerCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 1_0**9
92
1
import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib UpperCamelCase__ = threading.Lock() UpperCamelCase__ = None UpperCamelCase__ = { """debug""": logging.DEBUG, """info""": logging.INFO, """warning""": logging.WARNING, """error""": logging.ERROR, """critical""": logging.CRITICAL, } UpperCamelCase__ = logging.WARNING UpperCamelCase__ = True def _a ( ): __lowerCAmelCase = os.getenv("TRANSFORMERS_VERBOSITY" , SCREAMING_SNAKE_CASE_ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """ F"""has to be one of: { ', '.join(log_levels.keys() ) }""" ) return _default_log_level def _a ( ): return __name__.split("." )[0] def _a ( ): return logging.getLogger(_get_library_name() ) def _a ( ): global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return __lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream. __lowerCAmelCase = sys.stderr.flush # Apply our default configuration to the library root logger. __lowerCAmelCase = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) __lowerCAmelCase = False def _a ( ): global _default_handler with _lock: if not _default_handler: return __lowerCAmelCase = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) __lowerCAmelCase = None def _a ( ): return log_levels def _a ( SCREAMING_SNAKE_CASE_ : Optional[str] = None ): if name is None: __lowerCAmelCase = _get_library_name() _configure_library_root_logger() return logging.getLogger(SCREAMING_SNAKE_CASE_ ) def _a ( ): _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def _a ( SCREAMING_SNAKE_CASE_ : int ): _configure_library_root_logger() _get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE_ ) def _a ( ): return set_verbosity(SCREAMING_SNAKE_CASE_ ) def _a ( ): return set_verbosity(SCREAMING_SNAKE_CASE_ ) def _a ( ): return set_verbosity(SCREAMING_SNAKE_CASE_ ) def _a ( ): return set_verbosity(SCREAMING_SNAKE_CASE_ ) def _a ( ): _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def _a ( ): _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def _a ( SCREAMING_SNAKE_CASE_ : logging.Handler ): _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : logging.Handler ): _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(SCREAMING_SNAKE_CASE_ ) def _a ( ): _configure_library_root_logger() __lowerCAmelCase = False def _a ( ): _configure_library_root_logger() __lowerCAmelCase = True def _a ( ): __lowerCAmelCase = _get_library_root_logger().handlers for handler in handlers: __lowerCAmelCase = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" ) handler.setFormatter(SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(SCREAMING_SNAKE_CASE_ ) def _a ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , SCREAMING_SNAKE_CASE_ ) if no_advisory_warnings: return self.warning(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = warning_advice @functools.lru_cache(SCREAMING_SNAKE_CASE_ ) def _a ( self : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ): self.warning(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = warning_once class a__ : def __init__( self , *_A , **_A ): # pylint: disable=unused-argument """simple docstring""" __lowerCAmelCase = args[0] if args else None def __iter__( self ): """simple docstring""" return iter(self._iterator ) def __getattr__( self , _A ): """simple docstring""" def empty_fn(*_A , **_A ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): """simple docstring""" return self def __exit__( self , _A , _A , _A ): """simple docstring""" return class a__ : def __call__( self , *_A , **_A ): """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm(*_A , **_A ) else: return EmptyTqdm(*_A , **_A ) def __SCREAMING_SNAKE_CASE( self , *_A , **_A ): """simple docstring""" __lowerCAmelCase = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*_A , **_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() UpperCamelCase__ = _tqdm_cls() def _a ( ): global _tqdm_active return bool(_tqdm_active ) def _a ( ): global _tqdm_active __lowerCAmelCase = True hf_hub_utils.enable_progress_bars() def _a ( ): global _tqdm_active __lowerCAmelCase = False hf_hub_utils.disable_progress_bars()
92
from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
1
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = R""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class a__ ( snake_case__ ): @add_start_docstrings(_A ) def __call__( self , _A , _A , **_A ): """simple docstring""" raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class a__ ( snake_case__ ): def __init__( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = max_length __lowerCAmelCase = max_position_embeddings @add_start_docstrings(_A ) def __call__( self , _A , _A , **_A ): """simple docstring""" __lowerCAmelCase = input_ids.shape[-1] __lowerCAmelCase = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class a__ ( snake_case__ ): def __init__( self , _A , _A ): """simple docstring""" warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , _A , ) __lowerCAmelCase = start_length __lowerCAmelCase = max_new_tokens __lowerCAmelCase = start_length + max_new_tokens @add_start_docstrings(_A ) def __call__( self , _A , _A , **_A ): """simple docstring""" return input_ids.shape[-1] >= self.max_length class a__ ( snake_case__ ): def __init__( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = max_time __lowerCAmelCase = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(_A ) def __call__( self , _A , _A , **_A ): """simple docstring""" return time.time() - self.initial_timestamp > self.max_time class a__ ( snake_case__ ): @add_start_docstrings(_A ) def __call__( self , _A , _A , **_A ): """simple docstring""" return any(criteria(_A , _A ) for criteria in self ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for stopping_criterium in self: if isinstance(_A , _A ): return stopping_criterium.max_length elif isinstance(_A , _A ): return stopping_criterium.max_length return None def _a ( SCREAMING_SNAKE_CASE_ : StoppingCriteriaList , SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = stopping_criteria.max_length __lowerCAmelCase = deepcopy(SCREAMING_SNAKE_CASE_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , SCREAMING_SNAKE_CASE_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE_ ) ) return new_stopping_criteria
92
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case__ , unittest.TestCase ): _a : Optional[Any] = DebertaVaTokenizer _a : Optional[Any] = DebertaVaTokenizerFast _a : List[str] = True _a : Optional[Any] = True def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = DebertaVaTokenizer(_A , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = "this is a test" __lowerCAmelCase = "this is a test" return input_text, output_text def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "<pad>" __lowerCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(_A ) , 3_0_0_0_1 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = " \tHeLLo!how \n Are yoU? " __lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = " \tHeLLo!how \n Are yoU? " __lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "This is a test" __lowerCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9] __lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"] __lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] __lowerCAmelCase = DebertaVaTokenizer(_A , keep_accents=_A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , keep_accents=_A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) # fmt: off __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] __lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DebertaVaTokenizer(_A ) __lowerCAmelCase = tokenizer.encode("sequence builders" ) __lowerCAmelCase = tokenizer.encode("multi-sequence build" ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
92
1
UpperCamelCase__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCamelCase__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCamelCase__ = { 0: """Sunday""", 1: """Monday""", 2: """Tuesday""", 3: """Wednesday""", 4: """Thursday""", 5: """Friday""", 6: """Saturday""", } def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): assert len(str(SCREAMING_SNAKE_CASE_ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __lowerCAmelCase = year // 1_00 __lowerCAmelCase = (5 * (century % 4) + 2) % 7 __lowerCAmelCase = year % 1_00 __lowerCAmelCase = centurian % 12 __lowerCAmelCase = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __lowerCAmelCase = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) __lowerCAmelCase = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
92
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf UpperCamelCase__ = logging.get_logger(__name__) @dataclass class a__ ( snake_case__ ): _a : List[str] = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **_A ): """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __lowerCAmelCase = deprecated_arg[3:] __lowerCAmelCase = not kwargs.pop(_A ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) __lowerCAmelCase = kwargs.pop("tpu_name" , self.tpu_name ) __lowerCAmelCase = kwargs.pop("device_idx" , self.device_idx ) __lowerCAmelCase = kwargs.pop("eager_mode" , self.eager_mode ) __lowerCAmelCase = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**_A ) _a : str = field( default=snake_case__ , metadata={"""help""": """Name of TPU"""} , ) _a : int = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) _a : bool = field(default=snake_case__ , metadata={"""help""": """Benchmark models in eager model."""} ) _a : bool = field( default=snake_case__ , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) __lowerCAmelCase = None if self.tpu: try: if self.tpu_name: __lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __lowerCAmelCase = None return tpu @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) __lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU __lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" ) return strategy @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return self._setup_strategy @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.n_gpu > 0
92
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase__ = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ): return (preds == labels).mean() @dataclass class a__ : _a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class a__ : _a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) _a : str = field(metadata={"""help""": """Should contain the data files for the task."""} ) _a : int = field( default=1_2_8 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _a : bool = field( default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _a ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE_ ) # Set seed set_seed(training_args.seed ) try: __lowerCAmelCase = processors[data_args.task_name]() __lowerCAmelCase = processor.get_labels() __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) except KeyError: raise ValueError("Task not found: %s" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) __lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , ) # Get datasets __lowerCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __lowerCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(SCREAMING_SNAKE_CASE_ : EvalPrediction ) -> Dict: __lowerCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , p.label_ids )} # Data collator __lowerCAmelCase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __lowerCAmelCase = Trainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowerCAmelCase = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) __lowerCAmelCase = trainer.evaluate() __lowerCAmelCase = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_master(): with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) writer.write("%s = %s\n" % (key, value) ) results.update(SCREAMING_SNAKE_CASE_ ) return results def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
92
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") UpperCamelCase__ = """pt""" if is_torch_available() else """tf""" @require_sentencepiece @require_tokenizers class a__ ( snake_case__ , unittest.TestCase ): _a : int = CamembertTokenizer _a : Dict = CamembertTokenizerFast _a : Tuple = True _a : List[Any] = True def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = CamembertTokenizer(_A ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "<pad>" __lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>NOTUSED" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(_A ) , 1_0_0_4 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = CamembertTokenizer(_A ) tokenizer.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.test_rust_tokenizer: return __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.tokenize(_A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = {"input_ids": [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. __lowerCAmelCase = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=_A , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_A , )
92
1
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = (1 - _cos) / 2 __lowerCAmelCase = 1 - _cos __lowerCAmelCase = 1 + alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = (1 + _cos) / 2 __lowerCAmelCase = -1 - _cos __lowerCAmelCase = 1 + alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = _sin / 2 __lowerCAmelCase = 0 __lowerCAmelCase = -ba __lowerCAmelCase = 1 + alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 1 - alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 + alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) , ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 10 ** (gain_db / 40) __lowerCAmelCase = 1 + alpha * big_a __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha * big_a __lowerCAmelCase = 1 + alpha / big_a __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha / big_a __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) , ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 10 ** (gain_db / 40) __lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos __lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos __lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos __lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos __lowerCAmelCase = 2 * sqrt(SCREAMING_SNAKE_CASE_ ) * alpha __lowerCAmelCase = big_a * (pmc + aaa) __lowerCAmelCase = 2 * big_a * mpc __lowerCAmelCase = big_a * (pmc - aaa) __lowerCAmelCase = ppmc + aaa __lowerCAmelCase = -2 * pmpc __lowerCAmelCase = ppmc - aaa __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) , ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 10 ** (gain_db / 40) __lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos __lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos __lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos __lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos __lowerCAmelCase = 2 * sqrt(SCREAMING_SNAKE_CASE_ ) * alpha __lowerCAmelCase = big_a * (ppmc + aaa) __lowerCAmelCase = -2 * big_a * pmpc __lowerCAmelCase = big_a * (ppmc - aaa) __lowerCAmelCase = pmc + aaa __lowerCAmelCase = 2 * mpc __lowerCAmelCase = pmc - aaa __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
92
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ): return x return (x, x) @require_tf class a__ : def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model} __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = after_output[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = np.abs((a - b) ).max() self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_save_load(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs() __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = after_outputs[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFViTModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFViTModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFDeiTModel(_A , name="vision_model" ) __lowerCAmelCase = TFRobertaModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFDeiTModelTester(self ) __lowerCAmelCase = TFRobertaModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = clip_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class a__ ( unittest.TestCase ): @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_A ) __lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __lowerCAmelCase = processor( text=["una foto di un gatto", "una foto di un cane"] , images=_A , padding=_A , return_tensors="np" ) __lowerCAmelCase = model(**_A ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) )
92
1
import math from numpy import inf from scipy.integrate import quad def _a ( SCREAMING_SNAKE_CASE_ : float ): if num <= 0: raise ValueError("math domain error" ) return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0] def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
92
import json import os import torch from diffusers import UNetaDModel os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): if hor == 1_28: __lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") __lowerCAmelCase = (32, 1_28, 2_56) __lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D") elif hor == 32: __lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") __lowerCAmelCase = (32, 64, 1_28, 2_56) __lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") __lowerCAmelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" ) __lowerCAmelCase = model.state_dict() __lowerCAmelCase = { "down_block_types": down_block_types, "block_out_channels": block_out_channels, "up_block_types": up_block_types, "layers_per_block": 1, "use_timestep_embedding": True, "out_block_type": "OutConv1DBlock", "norm_num_groups": 8, "downsample_each_block": False, "in_channels": 14, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "flip_sin_to_cos": False, "freq_shift": 1, "sample_size": 6_55_36, "mid_block_type": "MidResTemporalBlock1D", "act_fn": "mish", } __lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ ) torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" ) with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = { "in_channels": 14, "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": (), "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": (32, 64, 1_28, 2_56), "layers_per_block": 1, "downsample_each_block": True, "sample_size": 6_55_36, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1, "norm_num_groups": 8, "act_fn": "mish", } __lowerCAmelCase = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" ) __lowerCAmelCase = model __lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ ) torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" ) with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
92
1
import os from math import logaa def _a ( SCREAMING_SNAKE_CASE_ : str = "base_exp.txt" ): __lowerCAmelCase = 0 __lowerCAmelCase = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) ): __lowerCAmelCase , __lowerCAmelCase = list(map(SCREAMING_SNAKE_CASE_ , line.split("," ) ) ) if x * logaa(SCREAMING_SNAKE_CASE_ ) > largest: __lowerCAmelCase = x * logaa(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = i + 1 return result if __name__ == "__main__": print(solution())
92
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): class a__ : def __init__( self , _A ): """simple docstring""" __lowerCAmelCase = metric_id class a__ : _a : Optional[int] = [MetricMock(snake_case__ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ): if "tmp_path" in args: __lowerCAmelCase = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(SCREAMING_SNAKE_CASE_ , match="https://huggingface.co/docs/evaluate" ): func(*SCREAMING_SNAKE_CASE_ )
92
1
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge UpperCamelCase__ = [ """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the""" """ final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe""" """ depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""", """The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal""" """ accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's""" """ founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the""" """ body.""", """Amnesty International releases its annual report on the death penalty. The report catalogs the use of""" """ state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the""" """ world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital""" """ punishment.""", ] UpperCamelCase__ = [ """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""" """ Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz""" """ had informed his Lufthansa training school of an episode of severe depression, airline says .""", """Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .""" """ Israel and the United States opposed the move, which could open the door to war crimes investigations against""" """ Israelis .""", """Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to""" """ death . Organization claims that governments around the world are using the threat of terrorism to advance""" """ executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death""" """ sentences up by 28% .""", ] def _a ( ): __lowerCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def _a ( ): __lowerCAmelCase = "rougeLsum" __lowerCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=[k] )[k] __lowerCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=[k] )[k] assert score > score_no_sep def _a ( ): __lowerCAmelCase = ["rouge1", "rouge2", "rougeL"] __lowerCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=SCREAMING_SNAKE_CASE_ ) assert score_sep == score_no_sep def _a ( ): __lowerCAmelCase = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] __lowerCAmelCase = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ ) == calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] __lowerCAmelCase = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] __lowerCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rouge_keys=["rougeLsum"] , newline_sep=SCREAMING_SNAKE_CASE_ )["rougeLsum"] __lowerCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def _a ( ): __lowerCAmelCase = Path("examples/seq2seq/test_data/wmt_en_ro" ) __lowerCAmelCase = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ ) assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
92
from random import randint from tempfile import TemporaryFile import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ): __lowerCAmelCase = 0 if start < end: __lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = a[end] __lowerCAmelCase = a[pivot] __lowerCAmelCase = temp __lowerCAmelCase , __lowerCAmelCase = _in_place_partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , p - 1 ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , p + 1 , SCREAMING_SNAKE_CASE_ ) return count def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = 0 __lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = a[end] __lowerCAmelCase = a[pivot] __lowerCAmelCase = temp __lowerCAmelCase = start - 1 for index in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value __lowerCAmelCase = new_pivot_index + 1 __lowerCAmelCase = a[new_pivot_index] __lowerCAmelCase = a[index] __lowerCAmelCase = temp __lowerCAmelCase = a[new_pivot_index + 1] __lowerCAmelCase = a[end] __lowerCAmelCase = temp return new_pivot_index + 1, count UpperCamelCase__ = TemporaryFile() UpperCamelCase__ = 100 # 1000 elements are to be sorted UpperCamelCase__ , UpperCamelCase__ = 0, 1 # mean and standard deviation UpperCamelCase__ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array UpperCamelCase__ = np.load(outfile) UpperCamelCase__ = len(M) - 1 UpperCamelCase__ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
92
1
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ): if nth_term == "": return [""] __lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [] for temp in range(int(SCREAMING_SNAKE_CASE_ ) ): series.append(F"""1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}""" if series else "1" ) return series if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ = int(input("""Enter the last number (nth term) of the P-Series""")) UpperCamelCase__ = int(input("""Enter the power for P-Series""")) print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""") print(p_series(nth_term, power))
92
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available UpperCamelCase__ = { """configuration_audio_spectrogram_transformer""": [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ASTConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ASTForAudioClassification""", """ASTModel""", """ASTPreTrainedModel""", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""ASTFeatureExtractor"""] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
import argparse import os import re import packaging.version UpperCamelCase__ = """examples/""" UpperCamelCase__ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCamelCase__ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCamelCase__ = """README.md""" def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern] __lowerCAmelCase = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not patch: update_version_in_examples(SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = "🤗 Transformers currently provides the following architectures" __lowerCAmelCase = "1. Want to contribute a new model?" with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.readlines() # Find the start of the list. __lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): __lowerCAmelCase = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) def _a ( ): with open(REPLACE_FILES["init"] , "r" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0] return packaging.version.parse(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=False ): __lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: __lowerCAmelCase = default_version.base_version elif patch: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = default_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def _a ( ): __lowerCAmelCase = get_version() __lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __lowerCAmelCase = current_version.base_version # Check with the user we got that right. __lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = dev_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCamelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
92
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ = { """configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""AlbertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""AlbertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """AlbertForMaskedLM""", """AlbertForMultipleChoice""", """AlbertForPreTraining""", """AlbertForQuestionAnswering""", """AlbertForSequenceClassification""", """AlbertForTokenClassification""", """AlbertModel""", """AlbertPreTrainedModel""", """load_tf_weights_in_albert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAlbertForMaskedLM""", """TFAlbertForMultipleChoice""", """TFAlbertForPreTraining""", """TFAlbertForQuestionAnswering""", """TFAlbertForSequenceClassification""", """TFAlbertForTokenClassification""", """TFAlbertMainLayer""", """TFAlbertModel""", """TFAlbertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """FlaxAlbertForMaskedLM""", """FlaxAlbertForMultipleChoice""", """FlaxAlbertForPreTraining""", """FlaxAlbertForQuestionAnswering""", """FlaxAlbertForSequenceClassification""", """FlaxAlbertForTokenClassification""", """FlaxAlbertModel""", """FlaxAlbertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a__ ( snake_case__ , unittest.TestCase ): _a : Dict = KandinskyImgaImgPipeline _a : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""] _a : str = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", ] _a : List[Any] = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] _a : int = False @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.time_input_dim @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.time_input_dim * 4 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 1_0_0 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , ) __lowerCAmelCase = MultilingualCLIP(_A ) __lowerCAmelCase = text_encoder.eval() return text_encoder @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase = UNetaDConditionModel(**_A ) return model @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = { "num_train_timesteps": 1_0_0_0, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __lowerCAmelCase = DDIMScheduler(**_A ) __lowerCAmelCase = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ): """simple docstring""" __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A ) __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) if str(_A ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(_A ) else: __lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) __lowerCAmelCase = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 1_0, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**_A ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(_A ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowerCAmelCase = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __lowerCAmelCase = "A red cartoon frog, 4k" __lowerCAmelCase = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(_A ) __lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( _A , generator=_A , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __lowerCAmelCase = pipeline( _A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(_A , _A )
92
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ): __lowerCAmelCase = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tiny-gpt2" __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A ) __lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sgugger/tiny-distilbert-classification" __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , only_pretrain_model=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A ) __lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tiny-gpt2" __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , torchscript=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A ) __lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tiny-gpt2" __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , fpaa=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A ) __lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tiny-gpt2" __lowerCAmelCase = AutoConfig.from_pretrained(_A ) # set architectures equal to `None` __lowerCAmelCase = None __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A , configs=[config] ) __lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tiny-gpt2" __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A ) __lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tiny-gpt2" __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_A , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A ) __lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tiny-gpt2" __lowerCAmelCase = AutoConfig.from_pretrained(_A ) __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A , configs=[config] ) __lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tinier_bart" __lowerCAmelCase = AutoConfig.from_pretrained(_A ) __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A , configs=[config] ) __lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tiny-gpt2" __lowerCAmelCase = AutoConfig.from_pretrained(_A ) __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A , configs=[config] ) __lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tinier_bart" __lowerCAmelCase = AutoConfig.from_pretrained(_A ) __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A , configs=[config] ) __lowerCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , save_to_csv=_A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_A , "inf_time.csv" ) , train_memory_csv_file=os.path.join(_A , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(_A , "inf_mem.csv" ) , train_time_csv_file=os.path.join(_A , "train_time.csv" ) , env_info_csv_file=os.path.join(_A , "env.csv" ) , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A ) benchmark.run() self.assertTrue(Path(os.path.join(_A , "inf_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(_A , "train_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(_A , "inf_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(_A , "train_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(_A , "env.csv" ) ).exists() ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(_A ): self.assertTrue(hasattr(_A , "sequential" ) ) self.assertTrue(hasattr(_A , "cumulative" ) ) self.assertTrue(hasattr(_A , "current" ) ) self.assertTrue(hasattr(_A , "total" ) ) with tempfile.TemporaryDirectory() as tmp_dir: __lowerCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_A , "log.txt" ) , log_print=_A , trace_memory_line_by_line=_A , multi_process=_A , ) __lowerCAmelCase = PyTorchBenchmark(_A ) __lowerCAmelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_A , "log.txt" ) ).exists() )
92
class a__ ( snake_case__ ): pass class a__ ( snake_case__ ): pass class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [ [], [], [], ] def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" try: if len(self.queues[priority] ) >= 1_0_0: raise OverflowError("Maximum queue size is 100" ) self.queues[priority].append(_A ) except IndexError: raise ValueError("Valid priorities are 0, 1, and 2" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("All queues are empty" ) def __str__( self ): """simple docstring""" return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) ) class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [] def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if len(self.queue ) == 1_0_0: raise OverFlowError("Maximum queue size is 100" ) self.queue.append(_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.queue: raise UnderFlowError("The queue is empty" ) else: __lowerCAmelCase = min(self.queue ) self.queue.remove(_A ) return data def __str__( self ): """simple docstring""" return str(self.queue ) def _a ( ): __lowerCAmelCase = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 1_00 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _a ( ): __lowerCAmelCase = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(1_00 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
92
1
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCamelCase__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class a__ ( datasets.BuilderConfig ): _a : Optional[datasets.Features] = None def _a ( SCREAMING_SNAKE_CASE_ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE_ : List[int] , ): import pyspark def generate_fn(): __lowerCAmelCase = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) ) for partition_id in partition_order: __lowerCAmelCase = df_with_partition_id.select("*" ).where(F"""part_id = {partition_id}""" ).drop("part_id" ) __lowerCAmelCase = partition_df.collect() __lowerCAmelCase = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class a__ ( _BaseExamplesIterable ): def __init__( self , _A , _A=None , ): """simple docstring""" __lowerCAmelCase = df __lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() ) __lowerCAmelCase = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ): """simple docstring""" yield from self.generate_examples_fn() def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_A ) return SparkExamplesIterable(self.df , partition_order=_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = self.split_shard_indices_by_worker(_A , _A ) return SparkExamplesIterable(self.df , partition_order=_A ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return len(self.partition_order ) class a__ ( datasets.DatasetBuilder ): _a : Optional[Any] = SparkConfig def __init__( self , _A , _A = None , _A = None , **_A , ): """simple docstring""" import pyspark __lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate() __lowerCAmelCase = df __lowerCAmelCase = working_dir super().__init__( cache_dir=_A , config_name=str(self.df.semanticHash() ) , **_A , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" def create_cache_and_write_probe(_A ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=_A ) __lowerCAmelCase = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_A , "a" ) return [probe_file] if self._spark.conf.get("spark.master" , "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __lowerCAmelCase = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_A ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" import pyspark def get_arrow_batch_size(_A ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) __lowerCAmelCase = self.df.count() __lowerCAmelCase = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __lowerCAmelCase = ( self.df.limit(_A ) .repartition(1 ) .mapInArrow(_A , "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) __lowerCAmelCase = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __lowerCAmelCase = min(_A , int(approx_total_size / max_shard_size ) ) __lowerCAmelCase = self.df.repartition(_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , ): """simple docstring""" import pyspark __lowerCAmelCase = ParquetWriter if file_format == "parquet" else ArrowWriter __lowerCAmelCase = os.path.join(self._working_dir , os.path.basename(_A ) ) if self._working_dir else fpath __lowerCAmelCase = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __lowerCAmelCase = self.config.features __lowerCAmelCase = self._writer_batch_size __lowerCAmelCase = self._fs.storage_options def write_arrow(_A ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __lowerCAmelCase = pyspark.TaskContext().taskAttemptId() __lowerCAmelCase = next(_A , _A ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , ) __lowerCAmelCase = 0 __lowerCAmelCase = writer_class( features=_A , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , ) __lowerCAmelCase = pa.Table.from_batches([first_batch] ) writer.write_table(_A ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __lowerCAmelCase , __lowerCAmelCase = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) shard_id += 1 __lowerCAmelCase = writer_class( features=writer._features , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , ) __lowerCAmelCase = pa.Table.from_batches([batch] ) writer.write_table(_A ) if writer._num_bytes > 0: __lowerCAmelCase , __lowerCAmelCase = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_A ) ): __lowerCAmelCase = os.path.join(os.path.dirname(_A ) , os.path.basename(_A ) ) shutil.move(_A , _A ) __lowerCAmelCase = ( self.df.mapInArrow(_A , "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __SCREAMING_SNAKE_CASE( self , _A , _A = "arrow" , _A = None , _A = None , **_A , ): """simple docstring""" self._validate_cache_dir() __lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_A ) __lowerCAmelCase = not is_remote_filesystem(self._fs ) __lowerCAmelCase = os.path.join if is_local else posixpath.join __lowerCAmelCase = "-TTTTT-SSSSS-of-NNNNN" __lowerCAmelCase = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" __lowerCAmelCase = path_join(self._output_dir , _A ) __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = [] __lowerCAmelCase = [] for task_id, content in self._prepare_split_single(_A , _A , _A ): ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_A ) __lowerCAmelCase = total_num_examples __lowerCAmelCase = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: __lowerCAmelCase = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __lowerCAmelCase = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( _A , _A , _A , ): rename( _A , fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , f"""{global_shard_id:05d}""" ).replace("NNNNN" , f"""{total_shards:05d}""" ) , ) __lowerCAmelCase = [] __lowerCAmelCase = 0 for i in range(len(_A ) ): __lowerCAmelCase , __lowerCAmelCase = task_id_and_num_shards[i] for shard_id in range(_A ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_A , len(_A ) ).map(lambda _A : _rename_shard(*_A ) ).collect() else: # don't use any pattern __lowerCAmelCase = 0 __lowerCAmelCase = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace(_A , "" ) , ) def __SCREAMING_SNAKE_CASE( self , _A , ): """simple docstring""" return SparkExamplesIterable(self.df )
92
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a__ : def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = scope __lowerCAmelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCAmelCase = (image_size // patch_size) ** 2 __lowerCAmelCase = num_patches + 2 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTModel(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTForMaskedImageModeling(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForMaskedImageModeling(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = self.type_sequence_label_size __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a__ ( snake_case__ , snake_case__ , unittest.TestCase ): _a : Optional[Any] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _a : int = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _a : Optional[Any] = False _a : Tuple = False _a : Tuple = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ): """simple docstring""" __lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.model_tester.is_training: return __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_A ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __lowerCAmelCase = False __lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue __lowerCAmelCase = model_class(_A ) model.gradient_checkpointing_enable() model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_A ), *get_values(_A ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ): __lowerCAmelCase = problem_type["title"] __lowerCAmelCase = problem_type["num_labels"] __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) if problem_type["num_labels"] > 1: __lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) __lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_A ) as warning_list: __lowerCAmelCase = model(**_A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = DeiTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _a ( ): __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( _A ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**_A ) # verify the logits __lowerCAmelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _A ) __lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ) __lowerCAmelCase = inputs.pixel_values.to(_A ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __lowerCAmelCase = model(_A )
92
1
import argparse from collections import defaultdict import yaml UpperCamelCase__ = """docs/source/en/_toctree.yml""" def _a ( SCREAMING_SNAKE_CASE_ : List[str] ): __lowerCAmelCase = defaultdict(SCREAMING_SNAKE_CASE_ ) for doc in model_doc: counts[doc["local"]] += 1 __lowerCAmelCase = [key for key, value in counts.items() if value > 1] __lowerCAmelCase = [] for duplicate_key in duplicates: __lowerCAmelCase = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(SCREAMING_SNAKE_CASE_ ) > 1: raise ValueError( F"""{duplicate_key} is present several times in the documentation table of content at """ "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : s["title"].lower() ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple=False ): with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" ) as f: __lowerCAmelCase = yaml.safe_load(f.read() ) # Get to the API doc __lowerCAmelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowerCAmelCase = content[api_idx]["sections"] # Then to the model doc __lowerCAmelCase = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 __lowerCAmelCase = api_doc[model_idx]["sections"] __lowerCAmelCase = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE_ ) if "sections" in section] __lowerCAmelCase = False for idx, modality_doc in modalities_docs: __lowerCAmelCase = modality_doc["sections"] __lowerCAmelCase = clean_model_doc_toc(SCREAMING_SNAKE_CASE_ ) if old_modality_doc != new_modality_doc: __lowerCAmelCase = True if overwrite: __lowerCAmelCase = new_modality_doc if diff: if overwrite: __lowerCAmelCase = model_doc __lowerCAmelCase = api_doc with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(SCREAMING_SNAKE_CASE_ , allow_unicode=SCREAMING_SNAKE_CASE_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCamelCase__ = parser.parse_args() check_model_doc(args.fix_and_overwrite)
92
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ): __lowerCAmelCase = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
92
1
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int ): # Checks if the entire collection has been sorted if len(SCREAMING_SNAKE_CASE_ ) <= 1 or n <= 1: return insert_next(SCREAMING_SNAKE_CASE_ , n - 1 ) rec_insertion_sort(SCREAMING_SNAKE_CASE_ , n - 1 ) def _a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int ): # Checks order between adjacent elements if index >= len(SCREAMING_SNAKE_CASE_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowerCAmelCase , __lowerCAmelCase = ( collection[index], collection[index - 1], ) insert_next(SCREAMING_SNAKE_CASE_ , index + 1 ) if __name__ == "__main__": UpperCamelCase__ = input("""Enter integers separated by spaces: """) UpperCamelCase__ = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
92
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( """The `image_to_image.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionImg2ImgPipeline` instead.""" )
92
1
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCamelCase__ = """<<<<<<< This should probably be modified because it mentions: """ UpperCamelCase__ = """======= >>>>>>> """ UpperCamelCase__ = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] UpperCamelCase__ = [ # (pattern, replacement) # Order is important here for some replacements (R"""tfds\.core""", R"""datasets"""), (R"""tf\.io\.gfile\.GFile""", R"""open"""), (R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""), (R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""), (R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""), (R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""), (R"""tfds\.features\.FeaturesDict\(""", R"""dict("""), (R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (R"""tfds\.""", R"""datasets."""), (R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""), (R"""self\.builder_config""", R"""self.config"""), ] def _a ( SCREAMING_SNAKE_CASE_ : Namespace ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class a__ ( snake_case__ ): @staticmethod def __SCREAMING_SNAKE_CASE( _A ): """simple docstring""" __lowerCAmelCase = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=_A , required=_A , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=_A , required=_A , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=_A ) def __init__( self , _A , _A , *_A ): """simple docstring""" __lowerCAmelCase = get_logger("datasets-cli/converting" ) __lowerCAmelCase = tfds_path __lowerCAmelCase = datasets_directory def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if os.path.isdir(self._tfds_path ): __lowerCAmelCase = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): __lowerCAmelCase = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) __lowerCAmelCase = os.path.abspath(self._datasets_directory ) self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" ) __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = {} if os.path.isdir(self._tfds_path ): __lowerCAmelCase = os.listdir(_A ) else: __lowerCAmelCase = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f"""Looking at file {f_name}""" ) __lowerCAmelCase = os.path.join(_A , _A ) __lowerCAmelCase = os.path.join(_A , _A ) if not os.path.isfile(_A ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(_A , encoding="utf-8" ) as f: __lowerCAmelCase = f.readlines() __lowerCAmelCase = [] __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = [] for line in lines: __lowerCAmelCase = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __lowerCAmelCase = "import datasets\n" elif "import tensorflow" in out_line: # order is important here __lowerCAmelCase = "" continue elif "from absl import logging" in out_line: __lowerCAmelCase = "from datasets import logging\n" elif "getLogger" in out_line: __lowerCAmelCase = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): __lowerCAmelCase = True __lowerCAmelCase = list(filter(lambda _A : e in out_line , _A ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_A ) + "\n" ) out_lines.append(_A ) out_lines.append(_A ) continue else: for pattern, replacement in TO_CONVERT: __lowerCAmelCase = re.sub(_A , _A , _A ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __lowerCAmelCase = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , _A ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) __lowerCAmelCase = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f"""Error converting {out_line.strip()}""" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __lowerCAmelCase = True out_lines.append(_A ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __lowerCAmelCase = f_name.replace(".py" , "" ) __lowerCAmelCase = os.path.join(_A , _A ) __lowerCAmelCase = os.path.join(_A , _A ) os.makedirs(_A , exist_ok=_A ) self._logger.info(f"""Adding directory {output_dir}""" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(_A ) if needs_manual_update: with_manual_update.append(_A ) with open(_A , "w" , encoding="utf-8" ) as f: f.writelines(_A ) self._logger.info(f"""Converted in {output_file}""" ) for utils_file in utils_files: try: __lowerCAmelCase = os.path.basename(_A ) __lowerCAmelCase = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f"""Moving {dest_folder} to {utils_file}""" ) shutil.copy(_A , _A ) except KeyError: self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
92
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType UpperCamelCase__ = get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if accelerator.process_index == 0: logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {ckpt_dir}""" ) __lowerCAmelCase = {"model": state_dict} dist_cp.save_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Model saved to {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) if F"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading model from {ckpt_dir}""" ) __lowerCAmelCase = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , ) __lowerCAmelCase = state_dict["model"] logger.info(F"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state saved in {output_optimizer_file}""" ) else: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Optimizer state saved in {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" ) else: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if F"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading Optimizer from {ckpt_dir}""" ) __lowerCAmelCase = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , ) __lowerCAmelCase = optim_state["optimizer"] logger.info(F"""Optimizer loaded from {ckpt_dir}""" ) __lowerCAmelCase = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
92
1
def _a ( SCREAMING_SNAKE_CASE_ : int ): # noqa: E741 __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = 0 __lowerCAmelCase = [0] * n __lowerCAmelCase = [False] * n __lowerCAmelCase = [False] * n def dfs(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ): if parent == root: out_edge_count += 1 __lowerCAmelCase = True __lowerCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: __lowerCAmelCase = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __lowerCAmelCase = True # AP found via cycle if at == low[to]: __lowerCAmelCase = True else: __lowerCAmelCase = min(low[at] , SCREAMING_SNAKE_CASE_ ) return out_edge_count for i in range(SCREAMING_SNAKE_CASE_ ): if not visited[i]: __lowerCAmelCase = 0 __lowerCAmelCase = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = out_edge_count > 1 for x in range(len(SCREAMING_SNAKE_CASE_ ) ): if is_art[x] is True: print(SCREAMING_SNAKE_CASE_ ) # Adjacency list of graph UpperCamelCase__ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
92
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class a__ ( snake_case__ ): def __init__( self , *_A , _A=None , _A=None , **_A ): """simple docstring""" super().__init__(*_A , **_A ) __lowerCAmelCase = eval_examples __lowerCAmelCase = post_process_function def __SCREAMING_SNAKE_CASE( self , _A = None , _A=None , _A = None , _A = "eval" , **_A , ): """simple docstring""" __lowerCAmelCase = gen_kwargs.copy() __lowerCAmelCase = ( gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length ) __lowerCAmelCase = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams ) __lowerCAmelCase = gen_kwargs __lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset __lowerCAmelCase = self.get_eval_dataloader(_A ) __lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __lowerCAmelCase = self.compute_metrics __lowerCAmelCase = None __lowerCAmelCase = time.time() __lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowerCAmelCase = eval_loop( _A , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , ) finally: __lowerCAmelCase = compute_metrics __lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __lowerCAmelCase = self.post_process_function(_A , _A , _A ) __lowerCAmelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): __lowerCAmelCase = metrics.pop(_A ) metrics.update(output.metrics ) else: __lowerCAmelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_A ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A ) return metrics def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A = "test" , **_A ): """simple docstring""" __lowerCAmelCase = gen_kwargs.copy() __lowerCAmelCase = self.get_test_dataloader(_A ) # Temporarily disable metric computation, we will do it in the loop here. __lowerCAmelCase = self.compute_metrics __lowerCAmelCase = None __lowerCAmelCase = time.time() __lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowerCAmelCase = eval_loop( _A , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , ) finally: __lowerCAmelCase = compute_metrics __lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output __lowerCAmelCase = self.post_process_function(_A , _A , _A , "predict" ) __lowerCAmelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): __lowerCAmelCase = metrics.pop(_A ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
92
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class a__ ( snake_case__ ): _a : int = """deberta-v2""" def __init__( self , _A=1_2_8_1_0_0 , _A=1_5_3_6 , _A=2_4 , _A=2_4 , _A=6_1_4_4 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=0 , _A=0.02 , _A=1E-7 , _A=False , _A=-1 , _A=0 , _A=True , _A=None , _A=0 , _A="gelu" , **_A , ): """simple docstring""" super().__init__(**_A ) __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = relative_attention __lowerCAmelCase = max_relative_positions __lowerCAmelCase = pad_token_id __lowerCAmelCase = position_biased_input # Backwards compatibility if type(_A ) == str: __lowerCAmelCase = [x.strip() for x in pos_att_type.lower().split("|" )] __lowerCAmelCase = pos_att_type __lowerCAmelCase = vocab_size __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = kwargs.get("pooler_hidden_size" , _A ) __lowerCAmelCase = pooler_dropout __lowerCAmelCase = pooler_hidden_act class a__ ( snake_case__ ): @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if self.task == "multiple-choice": __lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: __lowerCAmelCase = {0: "batch", 1: "sequence"} if self._config.type_vocab_size > 0: return OrderedDict( [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] ) else: return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 1_2 def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = -1 , _A = False , _A = None , _A = 3 , _A = 4_0 , _A = 4_0 , _A = None , ): """simple docstring""" __lowerCAmelCase = super().generate_dummy_inputs(preprocessor=_A , framework=_A ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
92
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() ) __lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params UpperCamelCase__ = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ): if metric == "rouge2": __lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": __lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": __lowerCAmelCase = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" " function." ) __lowerCAmelCase = ModelCheckpoint( dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): return EarlyStopping( monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , ) class a__ ( pl.Callback ): def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=True ): """simple docstring""" logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) __lowerCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results __lowerCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": __lowerCAmelCase = od / "test_results.txt" __lowerCAmelCase = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" __lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , "a+" ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __lowerCAmelCase = metrics[key] if isinstance(_A , torch.Tensor ): __lowerCAmelCase = val.item() __lowerCAmelCase = f"""{key}: {val:.6f}\n""" writer.write(_A ) if not save_generations: return if "preds" in metrics: __lowerCAmelCase = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(_A ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" try: __lowerCAmelCase = pl_module.model.model.num_parameters() except AttributeError: __lowerCAmelCase = pl_module.model.num_parameters() __lowerCAmelCase = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , "test" ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
92
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]] __lowerCAmelCase = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = [[1, 2, 3], [1, 2, 4]] __lowerCAmelCase = DisjunctiveConstraint(_A ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 ) __lowerCAmelCase = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __lowerCAmelCase = DisjunctiveConstraint(_A ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
92
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
92
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a__ ( snake_case__ ): _a : Union[str, Any] = """ClapFeatureExtractor""" _a : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , _A , _A ): """simple docstring""" super().__init__(_A , _A ) def __call__( self , _A=None , _A=None , _A=None , **_A ): """simple docstring""" __lowerCAmelCase = kwargs.pop("sampling_rate" , _A ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: __lowerCAmelCase = self.tokenizer(_A , return_tensors=_A , **_A ) if audios is not None: __lowerCAmelCase = self.feature_extractor( _A , sampling_rate=_A , return_tensors=_A , **_A ) if text is not None and audios is not None: __lowerCAmelCase = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A ) , tensor_type=_A ) def __SCREAMING_SNAKE_CASE( self , *_A , **_A ): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A ) def __SCREAMING_SNAKE_CASE( self , *_A , **_A ): """simple docstring""" return self.tokenizer.decode(*_A , **_A ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.tokenizer.model_input_names __lowerCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
92
from queue import PriorityQueue from typing import Any import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : PriorityQueue , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue __lowerCAmelCase = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf ) __lowerCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) __lowerCAmelCase = new_cost_f __lowerCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ): __lowerCAmelCase = -1 __lowerCAmelCase = set() __lowerCAmelCase = set() __lowerCAmelCase = {source: 0} __lowerCAmelCase = {destination: 0} __lowerCAmelCase = {source: None} __lowerCAmelCase = {destination: None} __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __lowerCAmelCase , __lowerCAmelCase = queue_forward.get() visited_forward.add(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase = queue_backward.get() visited_backward.add(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = pass_and_relaxation( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) __lowerCAmelCase = pass_and_relaxation( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __lowerCAmelCase = shortest_distance return shortest_path_distance UpperCamelCase__ = { """B""": [["""C""", 1]], """C""": [["""D""", 1]], """D""": [["""F""", 1]], """E""": [["""B""", 1], ["""G""", 2]], """F""": [], """G""": [["""F""", 1]], } UpperCamelCase__ = { """B""": [["""E""", 1]], """C""": [["""B""", 1]], """D""": [["""C""", 1]], """F""": [["""D""", 1], ["""G""", 1]], """E""": [[None, np.inf]], """G""": [["""E""", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
92
1
from __future__ import annotations import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : list[float] ): return np.maximum(0 , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
92
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class a__ ( snake_case__ ): _a : Optional[int] = """decision_transformer""" _a : Optional[int] = ["""past_key_values"""] _a : Dict = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , _A=1_7 , _A=4 , _A=1_2_8 , _A=4_0_9_6 , _A=True , _A=1 , _A=1_0_2_4 , _A=3 , _A=1 , _A=None , _A="relu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , _A=False , **_A , ): """simple docstring""" __lowerCAmelCase = state_dim __lowerCAmelCase = act_dim __lowerCAmelCase = hidden_size __lowerCAmelCase = max_ep_len __lowerCAmelCase = action_tanh __lowerCAmelCase = vocab_size __lowerCAmelCase = n_positions __lowerCAmelCase = n_layer __lowerCAmelCase = n_head __lowerCAmelCase = n_inner __lowerCAmelCase = activation_function __lowerCAmelCase = resid_pdrop __lowerCAmelCase = embd_pdrop __lowerCAmelCase = attn_pdrop __lowerCAmelCase = layer_norm_epsilon __lowerCAmelCase = initializer_range __lowerCAmelCase = scale_attn_weights __lowerCAmelCase = use_cache __lowerCAmelCase = scale_attn_by_inverse_layer_idx __lowerCAmelCase = reorder_and_upcast_attn __lowerCAmelCase = bos_token_id __lowerCAmelCase = eos_token_id super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
92
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING UpperCamelCase__ = logging.get_logger(__name__) @add_end_docstrings(snake_case__ ) class a__ ( snake_case__ ): def __init__( self , **_A ): """simple docstring""" super().__init__(**_A ) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , "vision" ) self.check_model_type(_A ) def __call__( self , _A , _A = None , **_A , ): """simple docstring""" if "text_queries" in kwargs: __lowerCAmelCase = kwargs.pop("text_queries" ) if isinstance(_A , (str, Image.Image) ): __lowerCAmelCase = {"image": image, "candidate_labels": candidate_labels} else: __lowerCAmelCase = image __lowerCAmelCase = super().__call__(_A , **_A ) return results def __SCREAMING_SNAKE_CASE( self , **_A ): """simple docstring""" __lowerCAmelCase = {} if "threshold" in kwargs: __lowerCAmelCase = kwargs["threshold"] if "top_k" in kwargs: __lowerCAmelCase = kwargs["top_k"] return {}, {}, postprocess_params def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = load_image(inputs["image"] ) __lowerCAmelCase = inputs["candidate_labels"] if isinstance(_A , _A ): __lowerCAmelCase = candidate_labels.split("," ) __lowerCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(_A ): __lowerCAmelCase = self.tokenizer(_A , return_tensors=self.framework ) __lowerCAmelCase = self.image_processor(_A , return_tensors=self.framework ) yield { "is_last": i == len(_A ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = model_inputs.pop("target_size" ) __lowerCAmelCase = model_inputs.pop("candidate_label" ) __lowerCAmelCase = model_inputs.pop("is_last" ) __lowerCAmelCase = self.model(**_A ) __lowerCAmelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def __SCREAMING_SNAKE_CASE( self , _A , _A=0.1 , _A=None ): """simple docstring""" __lowerCAmelCase = [] for model_output in model_outputs: __lowerCAmelCase = model_output["candidate_label"] __lowerCAmelCase = BaseModelOutput(_A ) __lowerCAmelCase = self.image_processor.post_process_object_detection( outputs=_A , threshold=_A , target_sizes=model_output["target_size"] )[0] for index in outputs["scores"].nonzero(): __lowerCAmelCase = outputs["scores"][index].item() __lowerCAmelCase = self._get_bounding_box(outputs["boxes"][index][0] ) __lowerCAmelCase = {"score": score, "label": label, "box": box} results.append(_A ) __lowerCAmelCase = sorted(_A , key=lambda _A : x["score"] , reverse=_A ) if top_k: __lowerCAmelCase = results[:top_k] return results def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = box.int().tolist() __lowerCAmelCase = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
92
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ): _a : str = StableUnCLIPPipeline _a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS _a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS _a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _a : Optional[Any] = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = 3_2 __lowerCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) ) torch.manual_seed(0 ) __lowerCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , ) torch.manual_seed(0 ) __lowerCAmelCase = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) __lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A ) __lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) ) torch.manual_seed(0 ) __lowerCAmelCase = UNetaDConditionModel( sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , ) torch.manual_seed(0 ) __lowerCAmelCase = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , ) torch.manual_seed(0 ) __lowerCAmelCase = AutoencoderKL() __lowerCAmelCase = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ): """simple docstring""" if str(_A ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(_A ) else: __lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) __lowerCAmelCase = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=_A ) @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) __lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" ) __lowerCAmelCase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowerCAmelCase = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) __lowerCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 1_0**9
92
1
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split UpperCamelCase__ = datasets.load_iris() UpperCamelCase__ = np.array(data["""data"""]) UpperCamelCase__ = np.array(data["""target"""]) UpperCamelCase__ = data["""target_names"""] UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = train_test_split(X, y) def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ): return np.linalg.norm(np.array(SCREAMING_SNAKE_CASE_ ) - np.array(SCREAMING_SNAKE_CASE_ ) ) def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=5 ): __lowerCAmelCase = zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # List of distances of all points from the point to be classified __lowerCAmelCase = [] for data_point in data: __lowerCAmelCase = euclidean_distance(data_point[0] , SCREAMING_SNAKE_CASE_ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. __lowerCAmelCase = [i[1] for i in sorted(SCREAMING_SNAKE_CASE_ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified __lowerCAmelCase = Counter(SCREAMING_SNAKE_CASE_ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
92
from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class a__ ( snake_case__ ): _a : int = (DEISMultistepScheduler,) _a : List[Any] = (("""num_inference_steps""", 2_5),) def __SCREAMING_SNAKE_CASE( self , **_A ): """simple docstring""" __lowerCAmelCase = { "num_train_timesteps": 1_0_0_0, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, } config.update(**_A ) return config def __SCREAMING_SNAKE_CASE( self , _A=0 , **_A ): """simple docstring""" __lowerCAmelCase = dict(self.forward_default_kwargs ) __lowerCAmelCase = kwargs.pop("num_inference_steps" , _A ) __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample __lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowerCAmelCase = self.get_scheduler_config(**_A ) __lowerCAmelCase = scheduler_class(**_A ) scheduler.set_timesteps(_A ) # copy over dummy past residuals __lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_A ) __lowerCAmelCase = scheduler_class.from_pretrained(_A ) new_scheduler.set_timesteps(_A ) # copy over dummy past residuals __lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase , __lowerCAmelCase = sample, sample for t in range(_A , time_step + scheduler.config.solver_order + 1 ): __lowerCAmelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample __lowerCAmelCase = new_scheduler.step(_A , _A , _A , **_A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self , _A=0 , **_A ): """simple docstring""" __lowerCAmelCase = dict(self.forward_default_kwargs ) __lowerCAmelCase = kwargs.pop("num_inference_steps" , _A ) __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample __lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**_A ) scheduler.set_timesteps(_A ) # copy over dummy past residuals (must be after setting timesteps) __lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_A ) __lowerCAmelCase = scheduler_class.from_pretrained(_A ) # copy over dummy past residuals new_scheduler.set_timesteps(_A ) # copy over dummy past residual (must be after setting timesteps) __lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample __lowerCAmelCase = new_scheduler.step(_A , _A , _A , **_A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __SCREAMING_SNAKE_CASE( self , _A=None , **_A ): """simple docstring""" if scheduler is None: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(**_A ) __lowerCAmelCase = scheduler_class(**_A ) __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(**_A ) __lowerCAmelCase = scheduler_class(**_A ) __lowerCAmelCase = 1_0 __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_A ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase = model(_A , _A ) __lowerCAmelCase = scheduler.step(_A , _A , _A ).prev_sample return sample def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = dict(self.forward_default_kwargs ) __lowerCAmelCase = kwargs.pop("num_inference_steps" , _A ) for scheduler_class in self.scheduler_classes: __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**_A ) __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(_A , "set_timesteps" ): scheduler.set_timesteps(_A ) elif num_inference_steps is not None and not hasattr(_A , "set_timesteps" ): __lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] __lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] __lowerCAmelCase = scheduler.timesteps[5] __lowerCAmelCase = scheduler.timesteps[6] __lowerCAmelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample __lowerCAmelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) __lowerCAmelCase = self.full_loop(scheduler=_A ) __lowerCAmelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3 __lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase = self.full_loop(scheduler=_A ) __lowerCAmelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.check_over_configs(thresholding=_A ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , algorithm_type="deis" , solver_order=_A , solver_type=_A , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , ) __lowerCAmelCase = self.full_loop( solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , ) assert not torch.isnan(_A ).any(), "Samples have nan numbers" def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.check_over_configs(lower_order_final=_A ) self.check_over_configs(lower_order_final=_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=_A , time_step=0 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.full_loop() __lowerCAmelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.full_loop(prediction_type="v_prediction" ) __lowerCAmelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.0_91 ) < 1E-3 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(thresholding=_A , dynamic_thresholding_ratio=0 ) __lowerCAmelCase = scheduler_class(**_A ) __lowerCAmelCase = 1_0 __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(_A ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase = model(_A , _A ) __lowerCAmelCase = scheduler.step(_A , _A , _A ).prev_sample assert sample.dtype == torch.floataa
92
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case__ , unittest.TestCase ): _a : Optional[Any] = DebertaVaTokenizer _a : Optional[Any] = DebertaVaTokenizerFast _a : List[str] = True _a : Optional[Any] = True def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = DebertaVaTokenizer(_A , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = "this is a test" __lowerCAmelCase = "this is a test" return input_text, output_text def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "<pad>" __lowerCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(_A ) , 3_0_0_0_1 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = " \tHeLLo!how \n Are yoU? " __lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = " \tHeLLo!how \n Are yoU? " __lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "This is a test" __lowerCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9] __lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"] __lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] __lowerCAmelCase = DebertaVaTokenizer(_A , keep_accents=_A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , keep_accents=_A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) # fmt: off __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] __lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DebertaVaTokenizer(_A ) __lowerCAmelCase = tokenizer.encode("sequence builders" ) __lowerCAmelCase = tokenizer.encode("multi-sequence build" ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
92
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase__ = { """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf UpperCamelCase__ = logging.get_logger(__name__) @dataclass class a__ ( snake_case__ ): _a : List[str] = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **_A ): """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __lowerCAmelCase = deprecated_arg[3:] __lowerCAmelCase = not kwargs.pop(_A ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) __lowerCAmelCase = kwargs.pop("tpu_name" , self.tpu_name ) __lowerCAmelCase = kwargs.pop("device_idx" , self.device_idx ) __lowerCAmelCase = kwargs.pop("eager_mode" , self.eager_mode ) __lowerCAmelCase = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**_A ) _a : str = field( default=snake_case__ , metadata={"""help""": """Name of TPU"""} , ) _a : int = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) _a : bool = field(default=snake_case__ , metadata={"""help""": """Benchmark models in eager model."""} ) _a : bool = field( default=snake_case__ , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) __lowerCAmelCase = None if self.tpu: try: if self.tpu_name: __lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __lowerCAmelCase = None return tpu @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) __lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU __lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" ) return strategy @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return self._setup_strategy @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.n_gpu > 0
92
1
class a__ : def __init__( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = name __lowerCAmelCase = value __lowerCAmelCase = weight def __repr__( self ): """simple docstring""" return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})""" def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.value def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.name def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.weight def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.value / self.weight def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ): __lowerCAmelCase = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ): __lowerCAmelCase = sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [] __lowerCAmelCase , __lowerCAmelCase = 0.0, 0.0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _a ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
92
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") UpperCamelCase__ = """pt""" if is_torch_available() else """tf""" @require_sentencepiece @require_tokenizers class a__ ( snake_case__ , unittest.TestCase ): _a : int = CamembertTokenizer _a : Dict = CamembertTokenizerFast _a : Tuple = True _a : List[Any] = True def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = CamembertTokenizer(_A ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "<pad>" __lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>NOTUSED" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(_A ) , 1_0_0_4 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = CamembertTokenizer(_A ) tokenizer.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.test_rust_tokenizer: return __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.tokenize(_A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = {"input_ids": [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. __lowerCAmelCase = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=_A , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_A , )
92
1
import os from typing import Dict, List, Tuple, TypeVar, Union UpperCamelCase__ = TypeVar("""T""") UpperCamelCase__ = Union[List[T], Tuple[T, ...]] UpperCamelCase__ = Union[T, List[T], Dict[str, T]] UpperCamelCase__ = Union[str, bytes, os.PathLike]
92
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ): return x return (x, x) @require_tf class a__ : def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model} __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = after_output[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = np.abs((a - b) ).max() self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_save_load(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs() __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = after_outputs[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFViTModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFViTModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFDeiTModel(_A , name="vision_model" ) __lowerCAmelCase = TFRobertaModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFDeiTModelTester(self ) __lowerCAmelCase = TFRobertaModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = clip_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class a__ ( unittest.TestCase ): @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_A ) __lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __lowerCAmelCase = processor( text=["una foto di un gatto", "una foto di un cane"] , images=_A , padding=_A , return_tensors="np" ) __lowerCAmelCase = model(**_A ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) )
92
1
UpperCamelCase__ = """Alexander Joslin""" import operator as op from .stack import Stack def _a ( SCREAMING_SNAKE_CASE_ : str ): __lowerCAmelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub} __lowerCAmelCase = Stack() __lowerCAmelCase = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) ) elif i in operators: # RULE 2 operator_stack.push(SCREAMING_SNAKE_CASE_ ) elif i == ")": # RULE 4 __lowerCAmelCase = operator_stack.peek() operator_stack.pop() __lowerCAmelCase = operand_stack.peek() operand_stack.pop() __lowerCAmelCase = operand_stack.peek() operand_stack.pop() __lowerCAmelCase = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) operand_stack.push(SCREAMING_SNAKE_CASE_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": UpperCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
92
import json import os import torch from diffusers import UNetaDModel os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): if hor == 1_28: __lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") __lowerCAmelCase = (32, 1_28, 2_56) __lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D") elif hor == 32: __lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") __lowerCAmelCase = (32, 64, 1_28, 2_56) __lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") __lowerCAmelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" ) __lowerCAmelCase = model.state_dict() __lowerCAmelCase = { "down_block_types": down_block_types, "block_out_channels": block_out_channels, "up_block_types": up_block_types, "layers_per_block": 1, "use_timestep_embedding": True, "out_block_type": "OutConv1DBlock", "norm_num_groups": 8, "downsample_each_block": False, "in_channels": 14, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "flip_sin_to_cos": False, "freq_shift": 1, "sample_size": 6_55_36, "mid_block_type": "MidResTemporalBlock1D", "act_fn": "mish", } __lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ ) torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" ) with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = { "in_channels": 14, "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": (), "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": (32, 64, 1_28, 2_56), "layers_per_block": 1, "downsample_each_block": True, "sample_size": 6_55_36, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1, "norm_num_groups": 8, "act_fn": "mish", } __lowerCAmelCase = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" ) __lowerCAmelCase = model __lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ ) torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" ) with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
92
1
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class a__ ( snake_case__ ): _a : List[Any] = """mvp""" _a : Any = ["""past_key_values"""] _a : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , _A=5_0_2_6_7 , _A=1_0_2_4 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=0.0 , _A=0.0 , _A="gelu" , _A=1_0_2_4 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.02 , _A=0.0 , _A=False , _A=True , _A=1 , _A=0 , _A=2 , _A=True , _A=2 , _A=2 , _A=False , _A=1_0_0 , _A=8_0_0 , **_A , ): """simple docstring""" __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = d_model __lowerCAmelCase = encoder_ffn_dim __lowerCAmelCase = encoder_layers __lowerCAmelCase = encoder_attention_heads __lowerCAmelCase = decoder_ffn_dim __lowerCAmelCase = decoder_layers __lowerCAmelCase = decoder_attention_heads __lowerCAmelCase = dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = activation_dropout __lowerCAmelCase = activation_function __lowerCAmelCase = init_std __lowerCAmelCase = encoder_layerdrop __lowerCAmelCase = decoder_layerdrop __lowerCAmelCase = classifier_dropout __lowerCAmelCase = use_cache __lowerCAmelCase = encoder_layers __lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True __lowerCAmelCase = use_prompt __lowerCAmelCase = prompt_length __lowerCAmelCase = prompt_mid_dim super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , ) if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , _A ): __lowerCAmelCase = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." )
92
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): class a__ : def __init__( self , _A ): """simple docstring""" __lowerCAmelCase = metric_id class a__ : _a : Optional[int] = [MetricMock(snake_case__ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ): if "tmp_path" in args: __lowerCAmelCase = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(SCREAMING_SNAKE_CASE_ , match="https://huggingface.co/docs/evaluate" ): func(*SCREAMING_SNAKE_CASE_ )
92
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str]=False ): __lowerCAmelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ): for i in range(config.num_hidden_layers ): if base_model: __lowerCAmelCase = "" else: __lowerCAmelCase = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) __lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[ : config.hidden_size, : ] __lowerCAmelCase = in_proj_bias[: config.hidden_size] __lowerCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCAmelCase = in_proj_weight[ -config.hidden_size :, : ] __lowerCAmelCase = in_proj_bias[-config.hidden_size :] def _a ( SCREAMING_SNAKE_CASE_ : str ): __lowerCAmelCase = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): __lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = val def _a ( ): __lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int=True ): __lowerCAmelCase = ViTConfig() # patch_size if model_name[-1] == "8": __lowerCAmelCase = 8 # set labels if required if not base_model: __lowerCAmelCase = 10_00 __lowerCAmelCase = "huggingface/label-files" __lowerCAmelCase = "imagenet-1k-id2label.json" __lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) __lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: __lowerCAmelCase = 3_84 __lowerCAmelCase = 15_36 __lowerCAmelCase = 12 __lowerCAmelCase = 6 # load original model from torch hub __lowerCAmelCase = torch.hub.load("facebookresearch/dino:main" , SCREAMING_SNAKE_CASE_ ) original_model.eval() # load state_dict of original model, remove and rename some keys __lowerCAmelCase = original_model.state_dict() if base_model: remove_classification_head_(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load HuggingFace model if base_model: __lowerCAmelCase = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ ).eval() else: __lowerCAmelCase = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image, prepared by ViTImageProcessor __lowerCAmelCase = ViTImageProcessor() __lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" ) __lowerCAmelCase = encoding["pixel_values"] __lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ ) if base_model: __lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE_ ) assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 ) else: __lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE_ ) assert logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1E-3 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""dino_vitb16""", type=str, help="""Name of the model trained with DINO you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether to only convert the base model (no projection head weights).""", ) parser.set_defaults(base_model=True) UpperCamelCase__ = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
92
from random import randint from tempfile import TemporaryFile import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ): __lowerCAmelCase = 0 if start < end: __lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = a[end] __lowerCAmelCase = a[pivot] __lowerCAmelCase = temp __lowerCAmelCase , __lowerCAmelCase = _in_place_partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , p - 1 ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , p + 1 , SCREAMING_SNAKE_CASE_ ) return count def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = 0 __lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = a[end] __lowerCAmelCase = a[pivot] __lowerCAmelCase = temp __lowerCAmelCase = start - 1 for index in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value __lowerCAmelCase = new_pivot_index + 1 __lowerCAmelCase = a[new_pivot_index] __lowerCAmelCase = a[index] __lowerCAmelCase = temp __lowerCAmelCase = a[new_pivot_index + 1] __lowerCAmelCase = a[end] __lowerCAmelCase = temp return new_pivot_index + 1, count UpperCamelCase__ = TemporaryFile() UpperCamelCase__ = 100 # 1000 elements are to be sorted UpperCamelCase__ , UpperCamelCase__ = 0, 1 # mean and standard deviation UpperCamelCase__ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array UpperCamelCase__ = np.load(outfile) UpperCamelCase__ = len(M) - 1 UpperCamelCase__ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
92
1
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( snake_case__ , unittest.TestCase ): _a : Optional[Any] = GPTaTokenizer _a : Dict = GPTaTokenizerFast _a : Tuple = True _a : List[Any] = {"""add_prefix_space""": True} _a : Optional[int] = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] __lowerCAmelCase = dict(zip(_A , range(len(_A ) ) ) ) __lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] __lowerCAmelCase = {"unk_token": "<unk>"} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_A ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_A ) ) def __SCREAMING_SNAKE_CASE( self , **_A ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **_A ) def __SCREAMING_SNAKE_CASE( self , **_A ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_A ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = "lower newer" __lowerCAmelCase = "lower newer" return input_text, output_text def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowerCAmelCase = "lower newer" __lowerCAmelCase = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] __lowerCAmelCase = tokenizer.tokenize(_A , add_prefix_space=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.test_rust_tokenizer: return __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=_A ) __lowerCAmelCase = "lower newer" # Testing tokenization __lowerCAmelCase = tokenizer.tokenize(_A , add_prefix_space=_A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) # Testing conversion to ids without special tokens __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A , add_prefix_space=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) # Testing conversion to ids with special tokens __lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=_A ) __lowerCAmelCase = tokenizer.encode(_A , add_prefix_space=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # Testing the unknown token __lowerCAmelCase = tokens + [rust_tokenizer.unk_token] __lowerCAmelCase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_A ) , _A ) def __SCREAMING_SNAKE_CASE( self , *_A , **_A ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self , _A=1_5 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A ) # Simple input __lowerCAmelCase = "This is a simple input" __lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"] __lowerCAmelCase = ("This is a simple input", "This is a pair") __lowerCAmelCase = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding="max_length" ) # Simple input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding="max_length" ) # Simple input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding="max_length" , ) # Pair input self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding="max_length" ) # Pair input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding="max_length" ) # Pair input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding="max_length" , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input __lowerCAmelCase = "This is a simple input" __lowerCAmelCase = ["This is a simple input looooooooong", "This is a simple input"] __lowerCAmelCase = ("This is a simple input", "This is a pair") __lowerCAmelCase = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] __lowerCAmelCase = tokenizer.pad_token_id __lowerCAmelCase = tokenizer(_A , padding="max_length" , max_length=3_0 , return_tensors="np" ) __lowerCAmelCase = tokenizer(_A , padding=_A , truncate=_A , return_tensors="np" ) __lowerCAmelCase = tokenizer(*_A , padding="max_length" , max_length=6_0 , return_tensors="np" ) __lowerCAmelCase = tokenizer(_A , padding=_A , truncate=_A , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "$$$" __lowerCAmelCase = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_A , add_bos_token=_A ) __lowerCAmelCase = "This is a simple input" __lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"] __lowerCAmelCase = tokenizer.bos_token_id __lowerCAmelCase = tokenizer(_A ) __lowerCAmelCase = tokenizer(_A ) self.assertEqual(out_s.input_ids[0] , _A ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __lowerCAmelCase = tokenizer.decode(out_s.input_ids ) __lowerCAmelCase = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _A ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = [self.get_tokenizer(do_lower_case=_A , add_bos_token=_A )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowerCAmelCase = "Encode this." __lowerCAmelCase = "This one too please." __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) encoded_sequence += tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = tokenizer.encode_plus( _A , _A , add_special_tokens=_A , return_special_tokens_mask=_A , ) __lowerCAmelCase = encoded_sequence_dict["input_ids"] __lowerCAmelCase = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(_A ) , len(_A ) ) __lowerCAmelCase = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(_A ) ] __lowerCAmelCase = [x for x in filtered_sequence if x is not None] self.assertEqual(_A , _A ) @require_tokenizers class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=_A ) __lowerCAmelCase = "A photo of a cat" __lowerCAmelCase = tokenizer.encode( _A , ) self.assertEqual(_A , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("test_opt" ) __lowerCAmelCase = AutoTokenizer.from_pretrained("./test_opt" ) __lowerCAmelCase = tokenizer.encode( _A , ) self.assertEqual(_A , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=_A ) __lowerCAmelCase = "A photo of a cat" __lowerCAmelCase = tokenizer.encode( _A , ) # Same as above self.assertEqual(_A , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=_A ) __lowerCAmelCase = "bos" __lowerCAmelCase = tokenizer.get_vocab()["bos"] __lowerCAmelCase = "A photo of a cat" __lowerCAmelCase = tokenizer.encode( _A , ) # We changed the bos token self.assertEqual(_A , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("./tok" ) __lowerCAmelCase = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) __lowerCAmelCase = tokenizer.encode( _A , ) self.assertEqual(_A , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
92
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available UpperCamelCase__ = { """configuration_audio_spectrogram_transformer""": [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ASTConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ASTForAudioClassification""", """ASTModel""", """ASTPreTrainedModel""", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""ASTFeatureExtractor"""] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
1
from __future__ import annotations from random import choice def _a ( SCREAMING_SNAKE_CASE_ : List[str] ): return choice(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = random_pivot(SCREAMING_SNAKE_CASE_ ) # partition based on pivot # linear time __lowerCAmelCase = [e for e in lst if e < pivot] __lowerCAmelCase = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(SCREAMING_SNAKE_CASE_ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(SCREAMING_SNAKE_CASE_ ) < k - 1: return kth_number(SCREAMING_SNAKE_CASE_ , k - len(SCREAMING_SNAKE_CASE_ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
92
import argparse import os import re import packaging.version UpperCamelCase__ = """examples/""" UpperCamelCase__ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCamelCase__ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCamelCase__ = """README.md""" def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern] __lowerCAmelCase = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not patch: update_version_in_examples(SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = "🤗 Transformers currently provides the following architectures" __lowerCAmelCase = "1. Want to contribute a new model?" with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.readlines() # Find the start of the list. __lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): __lowerCAmelCase = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) def _a ( ): with open(REPLACE_FILES["init"] , "r" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0] return packaging.version.parse(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=False ): __lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: __lowerCAmelCase = default_version.base_version elif patch: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = default_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def _a ( ): __lowerCAmelCase = get_version() __lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __lowerCAmelCase = current_version.base_version # Check with the user we got that right. __lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = dev_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCamelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
92
1
from typing import Any class a__ : def __init__( self , _A ): """simple docstring""" __lowerCAmelCase = data __lowerCAmelCase = None class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = None def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.head while temp is not None: print(temp.data , end=" " ) __lowerCAmelCase = temp.next print() def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = Node(_A ) __lowerCAmelCase = self.head __lowerCAmelCase = new_node def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" if node_data_a == node_data_a: return else: __lowerCAmelCase = self.head while node_a is not None and node_a.data != node_data_a: __lowerCAmelCase = node_a.next __lowerCAmelCase = self.head while node_a is not None and node_a.data != node_data_a: __lowerCAmelCase = node_a.next if node_a is None or node_a is None: return __lowerCAmelCase , __lowerCAmelCase = node_a.data, node_a.data if __name__ == "__main__": UpperCamelCase__ = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
92
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a__ ( snake_case__ , unittest.TestCase ): _a : Dict = KandinskyImgaImgPipeline _a : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""] _a : str = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", ] _a : List[Any] = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] _a : int = False @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.time_input_dim @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.time_input_dim * 4 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 1_0_0 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , ) __lowerCAmelCase = MultilingualCLIP(_A ) __lowerCAmelCase = text_encoder.eval() return text_encoder @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase = UNetaDConditionModel(**_A ) return model @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = { "num_train_timesteps": 1_0_0_0, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __lowerCAmelCase = DDIMScheduler(**_A ) __lowerCAmelCase = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ): """simple docstring""" __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A ) __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) if str(_A ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(_A ) else: __lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) __lowerCAmelCase = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 1_0, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**_A ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(_A ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowerCAmelCase = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __lowerCAmelCase = "A red cartoon frog, 4k" __lowerCAmelCase = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(_A ) __lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( _A , generator=_A , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __lowerCAmelCase = pipeline( _A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(_A , _A )
92
1
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case__ , unittest.TestCase ): _a : Optional[Any] = DebertaVaTokenizer _a : Optional[Any] = DebertaVaTokenizerFast _a : List[str] = True _a : Optional[Any] = True def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = DebertaVaTokenizer(_A , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = "this is a test" __lowerCAmelCase = "this is a test" return input_text, output_text def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "<pad>" __lowerCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(_A ) , 3_0_0_0_1 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = " \tHeLLo!how \n Are yoU? " __lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = " \tHeLLo!how \n Are yoU? " __lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "This is a test" __lowerCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9] __lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"] __lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] __lowerCAmelCase = DebertaVaTokenizer(_A , keep_accents=_A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , keep_accents=_A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) # fmt: off __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] __lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DebertaVaTokenizer(_A ) __lowerCAmelCase = tokenizer.encode("sequence builders" ) __lowerCAmelCase = tokenizer.encode("multi-sequence build" ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
92
class a__ ( snake_case__ ): pass class a__ ( snake_case__ ): pass class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [ [], [], [], ] def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" try: if len(self.queues[priority] ) >= 1_0_0: raise OverflowError("Maximum queue size is 100" ) self.queues[priority].append(_A ) except IndexError: raise ValueError("Valid priorities are 0, 1, and 2" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("All queues are empty" ) def __str__( self ): """simple docstring""" return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) ) class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [] def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if len(self.queue ) == 1_0_0: raise OverFlowError("Maximum queue size is 100" ) self.queue.append(_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.queue: raise UnderFlowError("The queue is empty" ) else: __lowerCAmelCase = min(self.queue ) self.queue.remove(_A ) return data def __str__( self ): """simple docstring""" return str(self.queue ) def _a ( ): __lowerCAmelCase = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 1_00 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _a ( ): __lowerCAmelCase = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(1_00 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
92
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available UpperCamelCase__ = { """configuration_audio_spectrogram_transformer""": [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ASTConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ASTForAudioClassification""", """ASTModel""", """ASTPreTrainedModel""", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""ASTFeatureExtractor"""] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a__ : def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = scope __lowerCAmelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCAmelCase = (image_size // patch_size) ** 2 __lowerCAmelCase = num_patches + 2 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTModel(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTForMaskedImageModeling(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForMaskedImageModeling(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = self.type_sequence_label_size __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a__ ( snake_case__ , snake_case__ , unittest.TestCase ): _a : Optional[Any] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _a : int = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _a : Optional[Any] = False _a : Tuple = False _a : Tuple = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ): """simple docstring""" __lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.model_tester.is_training: return __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_A ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __lowerCAmelCase = False __lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue __lowerCAmelCase = model_class(_A ) model.gradient_checkpointing_enable() model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_A ), *get_values(_A ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ): __lowerCAmelCase = problem_type["title"] __lowerCAmelCase = problem_type["num_labels"] __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) if problem_type["num_labels"] > 1: __lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) __lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_A ) as warning_list: __lowerCAmelCase = model(**_A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = DeiTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _a ( ): __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( _A ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**_A ) # verify the logits __lowerCAmelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _A ) __lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ) __lowerCAmelCase = inputs.pixel_values.to(_A ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __lowerCAmelCase = model(_A )
92
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""GPTSw3Tokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ): __lowerCAmelCase = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
92
1
from __future__ import annotations from decimal import Decimal from numpy import array def _a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): __lowerCAmelCase = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(SCREAMING_SNAKE_CASE_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix __lowerCAmelCase = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creates a copy of the matrix with swapped positions of the elements __lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]] __lowerCAmelCase , __lowerCAmelCase = matrix[1][1], matrix[0][0] __lowerCAmelCase , __lowerCAmelCase = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(SCREAMING_SNAKE_CASE_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(SCREAMING_SNAKE_CASE_ ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule __lowerCAmelCase = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creating cofactor matrix __lowerCAmelCase = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] __lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) __lowerCAmelCase = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) __lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) __lowerCAmelCase = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) __lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) __lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) __lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) __lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) __lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) __lowerCAmelCase = array(SCREAMING_SNAKE_CASE_ ) for i in range(3 ): for j in range(3 ): __lowerCAmelCase = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix __lowerCAmelCase = array(SCREAMING_SNAKE_CASE_ ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE_ ) # Calculate the inverse of the matrix return [[float(d(SCREAMING_SNAKE_CASE_ ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
92
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( """The `image_to_image.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionImg2ImgPipeline` instead.""" )
92
1
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class a__ : def __init__( self , _A , _A=9_9 , _A=1_3 , _A=1_6 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=3_2 , _A=4 , _A=4 , _A=3_0 , _A=0 , _A=1 , _A=2 , _A=None , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = decoder_seq_length # For common tests __lowerCAmelCase = self.decoder_seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_attention_mask __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = d_model __lowerCAmelCase = d_model __lowerCAmelCase = decoder_layers __lowerCAmelCase = decoder_layers __lowerCAmelCase = decoder_ffn_dim __lowerCAmelCase = decoder_attention_heads __lowerCAmelCase = decoder_attention_heads __lowerCAmelCase = eos_token_id __lowerCAmelCase = bos_token_id __lowerCAmelCase = pad_token_id __lowerCAmelCase = decoder_start_token_id __lowerCAmelCase = use_cache __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = None __lowerCAmelCase = decoder_seq_length __lowerCAmelCase = 2 __lowerCAmelCase = 1 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_attention_mask: __lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __lowerCAmelCase = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , ): """simple docstring""" __lowerCAmelCase = True __lowerCAmelCase = TrOCRDecoder(config=_A ).to(_A ).eval() __lowerCAmelCase = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass __lowerCAmelCase = model(_A , use_cache=_A ) __lowerCAmelCase = model(_A ) __lowerCAmelCase = model(_A , use_cache=_A ) self.parent.assertTrue(len(_A ) == len(_A ) ) self.parent.assertTrue(len(_A ) == len(_A ) + 1 ) __lowerCAmelCase = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids __lowerCAmelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and __lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCAmelCase = model(_A )["last_hidden_state"] __lowerCAmelCase = model(_A , past_key_values=_A )["last_hidden_state"] # select random slice __lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCAmelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() __lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ): _a : List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () _a : Dict = (TrOCRForCausalLM,) if is_torch_available() else () _a : Union[str, Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} _a : List[Any] = True _a : Dict = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TrOCRStandaloneDecoderModelTester(self , is_training=_A ) __lowerCAmelCase = ConfigTester(self , config_class=_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return @unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass
92
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType UpperCamelCase__ = get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if accelerator.process_index == 0: logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {ckpt_dir}""" ) __lowerCAmelCase = {"model": state_dict} dist_cp.save_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Model saved to {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) if F"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading model from {ckpt_dir}""" ) __lowerCAmelCase = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , ) __lowerCAmelCase = state_dict["model"] logger.info(F"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state saved in {output_optimizer_file}""" ) else: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Optimizer state saved in {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" ) else: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if F"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading Optimizer from {ckpt_dir}""" ) __lowerCAmelCase = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , ) __lowerCAmelCase = optim_state["optimizer"] logger.info(F"""Optimizer loaded from {ckpt_dir}""" ) __lowerCAmelCase = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
92
1
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): while b: __lowerCAmelCase , __lowerCAmelCase = b, a % b return a def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE_ , a % b ) def _a ( ): print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
92
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class a__ ( snake_case__ ): def __init__( self , *_A , _A=None , _A=None , **_A ): """simple docstring""" super().__init__(*_A , **_A ) __lowerCAmelCase = eval_examples __lowerCAmelCase = post_process_function def __SCREAMING_SNAKE_CASE( self , _A = None , _A=None , _A = None , _A = "eval" , **_A , ): """simple docstring""" __lowerCAmelCase = gen_kwargs.copy() __lowerCAmelCase = ( gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length ) __lowerCAmelCase = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams ) __lowerCAmelCase = gen_kwargs __lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset __lowerCAmelCase = self.get_eval_dataloader(_A ) __lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __lowerCAmelCase = self.compute_metrics __lowerCAmelCase = None __lowerCAmelCase = time.time() __lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowerCAmelCase = eval_loop( _A , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , ) finally: __lowerCAmelCase = compute_metrics __lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __lowerCAmelCase = self.post_process_function(_A , _A , _A ) __lowerCAmelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): __lowerCAmelCase = metrics.pop(_A ) metrics.update(output.metrics ) else: __lowerCAmelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_A ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A ) return metrics def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A = "test" , **_A ): """simple docstring""" __lowerCAmelCase = gen_kwargs.copy() __lowerCAmelCase = self.get_test_dataloader(_A ) # Temporarily disable metric computation, we will do it in the loop here. __lowerCAmelCase = self.compute_metrics __lowerCAmelCase = None __lowerCAmelCase = time.time() __lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowerCAmelCase = eval_loop( _A , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , ) finally: __lowerCAmelCase = compute_metrics __lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output __lowerCAmelCase = self.post_process_function(_A , _A , _A , "predict" ) __lowerCAmelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): __lowerCAmelCase = metrics.pop(_A ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
92
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() ) __lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params UpperCamelCase__ = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ): if metric == "rouge2": __lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": __lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": __lowerCAmelCase = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" " function." ) __lowerCAmelCase = ModelCheckpoint( dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): return EarlyStopping( monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , ) class a__ ( pl.Callback ): def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=True ): """simple docstring""" logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) __lowerCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results __lowerCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": __lowerCAmelCase = od / "test_results.txt" __lowerCAmelCase = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" __lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , "a+" ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __lowerCAmelCase = metrics[key] if isinstance(_A , torch.Tensor ): __lowerCAmelCase = val.item() __lowerCAmelCase = f"""{key}: {val:.6f}\n""" writer.write(_A ) if not save_generations: return if "preds" in metrics: __lowerCAmelCase = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(_A ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" try: __lowerCAmelCase = pl_module.model.model.num_parameters() except AttributeError: __lowerCAmelCase = pl_module.model.num_parameters() __lowerCAmelCase = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , "test" ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
92
1
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ): return x return (x, x) @require_tf class a__ : def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model} __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = after_output[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = np.abs((a - b) ).max() self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_save_load(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs() __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = after_outputs[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFViTModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFViTModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFDeiTModel(_A , name="vision_model" ) __lowerCAmelCase = TFRobertaModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFDeiTModelTester(self ) __lowerCAmelCase = TFRobertaModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = clip_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class a__ ( unittest.TestCase ): @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_A ) __lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __lowerCAmelCase = processor( text=["una foto di un gatto", "una foto di un cane"] , images=_A , padding=_A , return_tensors="np" ) __lowerCAmelCase = model(**_A ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) )
92
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
92
1
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = { "^": 3, "*": 2, "/": 2, "%": 2, "+": 1, "-": 1, } # Priority of each operator __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) if (len(SCREAMING_SNAKE_CASE_ ) > 7) else 7 # Print table header for output print( "Symbol".center(8 ) , "Stack".center(SCREAMING_SNAKE_CASE_ ) , "Postfix".center(SCREAMING_SNAKE_CASE_ ) , sep=" | " , ) print("-" * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(SCREAMING_SNAKE_CASE_ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(SCREAMING_SNAKE_CASE_ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(SCREAMING_SNAKE_CASE_ ) == 0: stack.append(SCREAMING_SNAKE_CASE_ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(SCREAMING_SNAKE_CASE_ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(SCREAMING_SNAKE_CASE_ ) # push x to stack print( x.center(8 ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=" | " , ) # Output in tabular format while len(SCREAMING_SNAKE_CASE_ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( " ".center(8 ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=" | " , ) # Output in tabular format return "".join(SCREAMING_SNAKE_CASE_ ) # return Postfix as str def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): __lowerCAmelCase = list(infix[::-1] ) # reverse the infix equation for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if infix[i] == "(": __lowerCAmelCase = ")" # change "(" to ")" elif infix[i] == ")": __lowerCAmelCase = "(" # change ")" to "(" return (infix_2_postfix("".join(SCREAMING_SNAKE_CASE_ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": UpperCamelCase__ = input("""\nEnter an Infix Equation = """) # Input an Infix equation UpperCamelCase__ = """""".join(Infix.split()) # Remove spaces from the input print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
92
from queue import PriorityQueue from typing import Any import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : PriorityQueue , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue __lowerCAmelCase = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf ) __lowerCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) __lowerCAmelCase = new_cost_f __lowerCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ): __lowerCAmelCase = -1 __lowerCAmelCase = set() __lowerCAmelCase = set() __lowerCAmelCase = {source: 0} __lowerCAmelCase = {destination: 0} __lowerCAmelCase = {source: None} __lowerCAmelCase = {destination: None} __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __lowerCAmelCase , __lowerCAmelCase = queue_forward.get() visited_forward.add(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase = queue_backward.get() visited_backward.add(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = pass_and_relaxation( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) __lowerCAmelCase = pass_and_relaxation( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __lowerCAmelCase = shortest_distance return shortest_path_distance UpperCamelCase__ = { """B""": [["""C""", 1]], """C""": [["""D""", 1]], """D""": [["""F""", 1]], """E""": [["""B""", 1], ["""G""", 2]], """F""": [], """G""": [["""F""", 1]], } UpperCamelCase__ = { """B""": [["""E""", 1]], """C""": [["""B""", 1]], """D""": [["""C""", 1]], """F""": [["""D""", 1], ["""G""", 1]], """E""": [[None, np.inf]], """G""": [["""E""", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
92
1
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) class a__ ( snake_case__ ): def __init__( self , _A ): """simple docstring""" super().__init__() __lowerCAmelCase = nn.ModuleList(_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A = None , _A = None , _A = None , _A = None , _A = False , _A = True , ): """simple docstring""" for i, (image, scale, controlnet) in enumerate(zip(_A , _A , self.nets ) ): __lowerCAmelCase , __lowerCAmelCase = controlnet( _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , ) # merge samples if i == 0: __lowerCAmelCase , __lowerCAmelCase = down_samples, mid_sample else: __lowerCAmelCase = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(_A , _A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __SCREAMING_SNAKE_CASE( self , _A , _A = True , _A = None , _A = False , _A = None , ): """simple docstring""" __lowerCAmelCase = 0 __lowerCAmelCase = save_directory for controlnet in self.nets: controlnet.save_pretrained( _A , is_main_process=_A , save_function=_A , safe_serialization=_A , variant=_A , ) idx += 1 __lowerCAmelCase = model_path_to_save + f"""_{idx}""" @classmethod def __SCREAMING_SNAKE_CASE( cls , _A , **_A ): """simple docstring""" __lowerCAmelCase = 0 __lowerCAmelCase = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... __lowerCAmelCase = pretrained_model_path while os.path.isdir(_A ): __lowerCAmelCase = ControlNetModel.from_pretrained(_A , **_A ) controlnets.append(_A ) idx += 1 __lowerCAmelCase = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(_A )} controlnets loaded from {pretrained_model_path}.""" ) if len(_A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(_A )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(_A )
92
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class a__ ( snake_case__ ): _a : Optional[int] = """decision_transformer""" _a : Optional[int] = ["""past_key_values"""] _a : Dict = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , _A=1_7 , _A=4 , _A=1_2_8 , _A=4_0_9_6 , _A=True , _A=1 , _A=1_0_2_4 , _A=3 , _A=1 , _A=None , _A="relu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , _A=False , **_A , ): """simple docstring""" __lowerCAmelCase = state_dim __lowerCAmelCase = act_dim __lowerCAmelCase = hidden_size __lowerCAmelCase = max_ep_len __lowerCAmelCase = action_tanh __lowerCAmelCase = vocab_size __lowerCAmelCase = n_positions __lowerCAmelCase = n_layer __lowerCAmelCase = n_head __lowerCAmelCase = n_inner __lowerCAmelCase = activation_function __lowerCAmelCase = resid_pdrop __lowerCAmelCase = embd_pdrop __lowerCAmelCase = attn_pdrop __lowerCAmelCase = layer_norm_epsilon __lowerCAmelCase = initializer_range __lowerCAmelCase = scale_attn_weights __lowerCAmelCase = use_cache __lowerCAmelCase = scale_attn_by_inverse_layer_idx __lowerCAmelCase = reorder_and_upcast_attn __lowerCAmelCase = bos_token_id __lowerCAmelCase = eos_token_id super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
92
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ = 16 UpperCamelCase__ = 32 def _a ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ): __lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" ) __lowerCAmelCase = load_dataset("glue" , "mrpc" ) def tokenize_function(SCREAMING_SNAKE_CASE_ : List[str] ): # max_length=None => use the model max length (it's actually the default) __lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCAmelCase = datasets.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(SCREAMING_SNAKE_CASE_ : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCAmelCase = 16 elif accelerator.mixed_precision != "no": __lowerCAmelCase = 8 else: __lowerCAmelCase = None return tokenizer.pad( SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , ) # Instantiate dataloaders. __lowerCAmelCase = DataLoader( tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = DataLoader( tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase__ = mocked_dataloaders # noqa: F811 def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1": __lowerCAmelCase = 2 # New Code # __lowerCAmelCase = int(args.gradient_accumulation_steps ) # Initialize accelerator __lowerCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE_ ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCAmelCase = config["lr"] __lowerCAmelCase = int(config["num_epochs"] ) __lowerCAmelCase = int(config["seed"] ) __lowerCAmelCase = int(config["batch_size"] ) __lowerCAmelCase = evaluate.load("glue" , "mrpc" ) set_seed(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCAmelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ ) # Instantiate scheduler __lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE_ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = output.loss accelerator.backward(SCREAMING_SNAKE_CASE_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = outputs.logits.argmax(dim=-1 ) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , ) __lowerCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
92
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ): _a : str = StableUnCLIPPipeline _a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS _a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS _a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _a : Optional[Any] = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = 3_2 __lowerCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) ) torch.manual_seed(0 ) __lowerCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , ) torch.manual_seed(0 ) __lowerCAmelCase = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) __lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A ) __lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) ) torch.manual_seed(0 ) __lowerCAmelCase = UNetaDConditionModel( sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , ) torch.manual_seed(0 ) __lowerCAmelCase = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , ) torch.manual_seed(0 ) __lowerCAmelCase = AutoencoderKL() __lowerCAmelCase = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ): """simple docstring""" if str(_A ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(_A ) else: __lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) __lowerCAmelCase = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=_A ) @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) __lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" ) __lowerCAmelCase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowerCAmelCase = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) __lowerCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 1_0**9
92
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _a ( ): __lowerCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png" __lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("RGB" ) return image def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): __lowerCAmelCase = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") ) # fmt: on return rename_keys def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ): __lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = val def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __lowerCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) __lowerCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict __lowerCAmelCase = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE_ , requires_grad=SCREAMING_SNAKE_CASE_ ), v_bias) ) __lowerCAmelCase = qkv_bias def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ): __lowerCAmelCase = 3_64 if "coco" in model_name else 2_24 __lowerCAmelCase = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE_ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: __lowerCAmelCase = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=SCREAMING_SNAKE_CASE_ ).to_dict() elif "opt-6.7b" in model_name: __lowerCAmelCase = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=SCREAMING_SNAKE_CASE_ ).to_dict() elif "t5-xl" in model_name: __lowerCAmelCase = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __lowerCAmelCase = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() __lowerCAmelCase = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE_ , text_config=SCREAMING_SNAKE_CASE_ ) return config, image_size @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ): __lowerCAmelCase = ( AutoTokenizer.from_pretrained("facebook/opt-2.7b" ) if "opt" in model_name else AutoTokenizer.from_pretrained("google/flan-t5-xl" ) ) __lowerCAmelCase = tokenizer("\n" , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0] __lowerCAmelCase , __lowerCAmelCase = get_blipa_config(SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval() __lowerCAmelCase = { "blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"), "blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"), "blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"), "blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"), "blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"), "blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"), "blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"), } __lowerCAmelCase , __lowerCAmelCase = model_name_to_original[model_name] # load original model print("Loading original model..." ) __lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu" __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = load_model_and_preprocess( name=SCREAMING_SNAKE_CASE_ , model_type=SCREAMING_SNAKE_CASE_ , is_eval=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) original_model.eval() print("Done!" ) # update state dict keys __lowerCAmelCase = original_model.state_dict() __lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) if key.startswith("Qformer.bert" ): __lowerCAmelCase = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: __lowerCAmelCase = key.replace("self" , "attention" ) if "opt_proj" in key: __lowerCAmelCase = key.replace("opt_proj" , "language_projection" ) if "t5_proj" in key: __lowerCAmelCase = key.replace("t5_proj" , "language_projection" ) if key.startswith("opt" ): __lowerCAmelCase = key.replace("opt" , "language" ) if key.startswith("t5" ): __lowerCAmelCase = key.replace("t5" , "language" ) __lowerCAmelCase = val # read in qv biases read_in_q_v_bias(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase = hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) assert len(SCREAMING_SNAKE_CASE_ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] __lowerCAmelCase = load_demo_image() __lowerCAmelCase = vis_processors["eval"](SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) # create processor __lowerCAmelCase = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values.to(SCREAMING_SNAKE_CASE_ ) # make sure processor creates exact same pixel values assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) original_model.to(SCREAMING_SNAKE_CASE_ ) hf_model.to(SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): if "opt" in model_name: __lowerCAmelCase = original_model({"image": original_pixel_values, "text_input": [""]} ).logits __lowerCAmelCase = hf_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).logits else: __lowerCAmelCase = original_model( {"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits __lowerCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) __lowerCAmelCase = hf_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ).logits assert original_logits.shape == logits.shape print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": __lowerCAmelCase = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=SCREAMING_SNAKE_CASE_ ) assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": __lowerCAmelCase = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=SCREAMING_SNAKE_CASE_ ) else: # cast to same type __lowerCAmelCase = logits.dtype assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , atol=1E-2 ) print("Looks ok!" ) print("Generating a caption..." ) __lowerCAmelCase = "" __lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = original_model.generate({"image": original_pixel_values} ) __lowerCAmelCase = hf_model.generate( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("Original generation:" , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = input_ids.shape[1] __lowerCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [text.strip() for text in output_text] print("HF generation:" , SCREAMING_SNAKE_CASE_ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: processor.push_to_hub(F"""nielsr/{model_name}""" ) hf_model.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() UpperCamelCase__ = [ """blip2-opt-2.7b""", """blip2-opt-6.7b""", """blip2-opt-2.7b-coco""", """blip2-opt-6.7b-coco""", """blip2-flan-t5-xl""", """blip2-flan-t5-xl-coco""", """blip2-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""blip2-opt-2.7b""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase__ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
92
from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
1
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False, False, False @dataclass class a__ : _a : Optional[int] = None _a : bool = True _a : bool = True _a : Optional[str] = None # Automatically constructed _a : ClassVar[str] = "dict" _a : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) _a : str = field(default="""Audio""" , init=snake_case__ , repr=snake_case__ ) def __call__( self ): """simple docstring""" return self.pa_type def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(_A , _A ): return {"bytes": None, "path": value} elif isinstance(_A , _A ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes __lowerCAmelCase = BytesIO() sf.write(_A , value["array"] , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) __lowerCAmelCase = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7 else: __lowerCAmelCase = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2_7_6_7 __lowerCAmelCase = BytesIO(bytes() ) sf.write(_A , _A , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) __lowerCAmelCase , __lowerCAmelCase = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err __lowerCAmelCase = xsplitext(_A )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: __lowerCAmelCase = token_per_repo_id or {} __lowerCAmelCase = path.split("::" )[-1] try: __lowerCAmelCase = string_to_dict(_A , config.HUB_DATASETS_URL )["repo_id"] __lowerCAmelCase = token_per_repo_id[repo_id] except (ValueError, KeyError): __lowerCAmelCase = None with xopen(_A , "rb" , use_auth_token=_A ) as f: __lowerCAmelCase , __lowerCAmelCase = sf.read(_A ) else: __lowerCAmelCase , __lowerCAmelCase = sf.read(_A ) __lowerCAmelCase = array.T if self.mono: __lowerCAmelCase = librosa.to_mono(_A ) if self.sampling_rate and self.sampling_rate != sampling_rate: __lowerCAmelCase = librosa.resample(_A , orig_sr=_A , target_sr=self.sampling_rate ) __lowerCAmelCase = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if pa.types.is_string(storage.type ): __lowerCAmelCase = pa.array([None] * len(_A ) , type=pa.binary() ) __lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __lowerCAmelCase = pa.array([None] * len(_A ) , type=pa.string() ) __lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): __lowerCAmelCase = pa.array([Audio().encode_example(_A ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: __lowerCAmelCase = storage.field("bytes" ) else: __lowerCAmelCase = pa.array([None] * len(_A ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: __lowerCAmelCase = storage.field("path" ) else: __lowerCAmelCase = pa.array([None] * len(_A ) , type=pa.string() ) __lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) return array_cast(_A , self.pa_type ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" @no_op_if_value_is_null def path_to_bytes(_A ): with xopen(_A , "rb" ) as f: __lowerCAmelCase = f.read() return bytes_ __lowerCAmelCase = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) __lowerCAmelCase = pa.array( [os.path.basename(_A ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) __lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(_A , self.pa_type )
92
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case__ , unittest.TestCase ): _a : Optional[Any] = DebertaVaTokenizer _a : Optional[Any] = DebertaVaTokenizerFast _a : List[str] = True _a : Optional[Any] = True def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = DebertaVaTokenizer(_A , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = "this is a test" __lowerCAmelCase = "this is a test" return input_text, output_text def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "<pad>" __lowerCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(_A ) , 3_0_0_0_1 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = " \tHeLLo!how \n Are yoU? " __lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = " \tHeLLo!how \n Are yoU? " __lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "This is a test" __lowerCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9] __lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"] __lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] __lowerCAmelCase = DebertaVaTokenizer(_A , keep_accents=_A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , keep_accents=_A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) # fmt: off __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] __lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DebertaVaTokenizer(_A ) __lowerCAmelCase = tokenizer.encode("sequence builders" ) __lowerCAmelCase = tokenizer.encode("multi-sequence build" ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
92
1
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process UpperCamelCase__ = logging.getLogger(__name__) UpperCamelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) UpperCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class a__ : _a : Optional[str] = field( default=snake_case__ , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(snake_case__ )} , ) _a : Optional[str] = field( default=snake_case__ , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) _a : bool = field( default=snake_case__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) _a : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) _a : bool = field( default=snake_case__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class a__ : _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) _a : Optional[str] = field(default=snake_case__ , metadata={"""help""": """The input training data file (a text file)."""} ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) _a : bool = field( default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) _a : Optional[int] = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) _a : Optional[int] = field( default=snake_case__ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) _a : Optional[int] = field( default=snake_case__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) _a : float = field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) _a : bool = field( default=snake_case__ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if self.train_file is not None: __lowerCAmelCase = self.train_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: __lowerCAmelCase = self.validation_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ): with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" ) as f: __lowerCAmelCase = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in f.read().splitlines() if (len(SCREAMING_SNAKE_CASE_ ) > 0 and not line.isspace())] assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = {c: dataset[c] for c in dataset.column_names} __lowerCAmelCase = refs return Dataset.from_dict(SCREAMING_SNAKE_CASE_ ) def _a ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE_ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __lowerCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): __lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) __lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: __lowerCAmelCase = {} if data_args.train_file is not None: __lowerCAmelCase = data_args.train_file if data_args.validation_file is not None: __lowerCAmelCase = data_args.validation_file __lowerCAmelCase = data_args.train_file.split("." )[-1] if extension == "txt": __lowerCAmelCase = "text" __lowerCAmelCase = load_dataset(SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: __lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE_ ) elif model_args.model_name_or_path: __lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ ) else: __lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) __lowerCAmelCase = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: __lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **SCREAMING_SNAKE_CASE_ ) elif model_args.model_name_or_path: __lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: __lowerCAmelCase = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) __lowerCAmelCase = AutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE_ ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: __lowerCAmelCase = datasets["train"].column_names else: __lowerCAmelCase = datasets["validation"].column_names __lowerCAmelCase = "text" if "text" in column_names else column_names[0] __lowerCAmelCase = "max_length" if data_args.pad_to_max_length else False def tokenize_function(SCREAMING_SNAKE_CASE_ : Union[str, Any] ): # Remove empty lines __lowerCAmelCase = [line for line in examples["text"] if len(SCREAMING_SNAKE_CASE_ ) > 0 and not line.isspace()] return tokenizer(examples["text"] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=data_args.max_seq_length ) __lowerCAmelCase = datasets.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: __lowerCAmelCase = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: __lowerCAmelCase = add_chinese_references( tokenized_datasets["validation"] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer __lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file if has_ref: __lowerCAmelCase = False # Data collator # This one will take care of randomly masking the tokens. __lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=SCREAMING_SNAKE_CASE_ , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __lowerCAmelCase = Trainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , ) # Training if training_args.do_train: if last_checkpoint is not None: __lowerCAmelCase = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): __lowerCAmelCase = model_args.model_name_or_path else: __lowerCAmelCase = None __lowerCAmelCase = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ ) trainer.save_model() # Saves the tokenizer too for easy upload __lowerCAmelCase = os.path.join(training_args.output_dir , "train_results.txt" ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer: logger.info("***** Train results *****" ) for key, value in sorted(train_result.metrics.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # Evaluation __lowerCAmelCase = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) __lowerCAmelCase = trainer.evaluate() __lowerCAmelCase = math.exp(eval_output["eval_loss"] ) __lowerCAmelCase = perplexity __lowerCAmelCase = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in sorted(results.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) return results def _a ( SCREAMING_SNAKE_CASE_ : Any ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
92
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf UpperCamelCase__ = logging.get_logger(__name__) @dataclass class a__ ( snake_case__ ): _a : List[str] = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **_A ): """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __lowerCAmelCase = deprecated_arg[3:] __lowerCAmelCase = not kwargs.pop(_A ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) __lowerCAmelCase = kwargs.pop("tpu_name" , self.tpu_name ) __lowerCAmelCase = kwargs.pop("device_idx" , self.device_idx ) __lowerCAmelCase = kwargs.pop("eager_mode" , self.eager_mode ) __lowerCAmelCase = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**_A ) _a : str = field( default=snake_case__ , metadata={"""help""": """Name of TPU"""} , ) _a : int = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) _a : bool = field(default=snake_case__ , metadata={"""help""": """Benchmark models in eager model."""} ) _a : bool = field( default=snake_case__ , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) __lowerCAmelCase = None if self.tpu: try: if self.tpu_name: __lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __lowerCAmelCase = None return tpu @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) __lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU __lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" ) return strategy @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return self._setup_strategy @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.n_gpu > 0
92
1
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCamelCase__ = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ): return [ int(10_00 * (box[0] / width) ), int(10_00 * (box[1] / height) ), int(10_00 * (box[2] / width) ), int(10_00 * (box[3] / height) ), ] def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[str] ): __lowerCAmelCase = to_pil_image(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase = pil_image.size __lowerCAmelCase = pytesseract.image_to_data(SCREAMING_SNAKE_CASE_ , lang=SCREAMING_SNAKE_CASE_ , output_type="dict" , config=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates __lowerCAmelCase = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if not word.strip()] __lowerCAmelCase = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __lowerCAmelCase = [] for x, y, w, h in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = [x, y, x + w, y + h] actual_boxes.append(SCREAMING_SNAKE_CASE_ ) # finally, normalize the bounding boxes __lowerCAmelCase = [] for box in actual_boxes: normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class a__ ( snake_case__ ): _a : Dict = ["""pixel_values"""] def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = 1 / 2_5_5 , _A = True , _A = None , _A = None , _A = True , _A = None , _A = "" , **_A , ): """simple docstring""" super().__init__(**_A ) __lowerCAmelCase = size if size is not None else {"height": 2_2_4, "width": 2_2_4} __lowerCAmelCase = get_size_dict(_A ) __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = resample __lowerCAmelCase = do_rescale __lowerCAmelCase = rescale_value __lowerCAmelCase = do_normalize __lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD __lowerCAmelCase = apply_ocr __lowerCAmelCase = ocr_lang __lowerCAmelCase = tesseract_config def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ): """simple docstring""" __lowerCAmelCase = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase = (size["height"], size["width"]) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = None , **_A , ): """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A = None , **_A , ): """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = None , _A=None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ): """simple docstring""" __lowerCAmelCase = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase = size if size is not None else self.size __lowerCAmelCase = get_size_dict(_A ) __lowerCAmelCase = resample if resample is not None else self.resample __lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase = image_mean if image_mean is not None else self.image_mean __lowerCAmelCase = image_std if image_std is not None else self.image_std __lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr __lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang __lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config __lowerCAmelCase = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("If do_normalize is True, image_mean and image_std must be specified." ) # All transformations expect numpy arrays. __lowerCAmelCase = [to_numpy_array(_A ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , "pytesseract" ) __lowerCAmelCase = [] __lowerCAmelCase = [] for image in images: __lowerCAmelCase , __lowerCAmelCase = apply_tesseract(_A , _A , _A ) words_batch.append(_A ) boxes_batch.append(_A ) if do_resize: __lowerCAmelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_rescale: __lowerCAmelCase = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __lowerCAmelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __lowerCAmelCase = [to_channel_dimension_format(_A , _A ) for image in images] __lowerCAmelCase = BatchFeature(data={"pixel_values": images} , tensor_type=_A ) if apply_ocr: __lowerCAmelCase = words_batch __lowerCAmelCase = boxes_batch return data
92
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") UpperCamelCase__ = """pt""" if is_torch_available() else """tf""" @require_sentencepiece @require_tokenizers class a__ ( snake_case__ , unittest.TestCase ): _a : int = CamembertTokenizer _a : Dict = CamembertTokenizerFast _a : Tuple = True _a : List[Any] = True def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = CamembertTokenizer(_A ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "<pad>" __lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>NOTUSED" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(_A ) , 1_0_0_4 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = CamembertTokenizer(_A ) tokenizer.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.test_rust_tokenizer: return __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.tokenize(_A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = {"input_ids": [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. __lowerCAmelCase = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=_A , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_A , )
92
1
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCamelCase__ = logging.get_logger(__name__) @add_end_docstrings(snake_case__ ) class a__ ( snake_case__ ): def __init__( self , *_A , **_A ): """simple docstring""" super().__init__(*_A , **_A ) requires_backends(self , "vision" ) self.check_model_type(_A ) def __call__( self , _A , **_A ): """simple docstring""" return super().__call__(_A , **_A ) def __SCREAMING_SNAKE_CASE( self , **_A ): """simple docstring""" return {}, {}, {} def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = load_image(_A ) __lowerCAmelCase = image.size __lowerCAmelCase = self.image_processor(images=_A , return_tensors=self.framework ) return model_inputs def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = self.model(**_A ) return model_outputs def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = model_outputs.predicted_depth __lowerCAmelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=_A ) __lowerCAmelCase = prediction.squeeze().cpu().numpy() __lowerCAmelCase = (output * 2_5_5 / np.max(_A )).astype("uint8" ) __lowerCAmelCase = Image.fromarray(_A ) __lowerCAmelCase = {} __lowerCAmelCase = predicted_depth __lowerCAmelCase = depth return output_dict
92
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ): return x return (x, x) @require_tf class a__ : def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model} __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = after_output[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = np.abs((a - b) ).max() self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_save_load(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs() __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = after_outputs[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFViTModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFViTModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFDeiTModel(_A , name="vision_model" ) __lowerCAmelCase = TFRobertaModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFDeiTModelTester(self ) __lowerCAmelCase = TFRobertaModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = clip_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class a__ ( unittest.TestCase ): @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_A ) __lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __lowerCAmelCase = processor( text=["una foto di un gatto", "una foto di un cane"] , images=_A , padding=_A , return_tensors="np" ) __lowerCAmelCase = model(**_A ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) )
92
1
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class a__ ( snake_case__ ): def __init__( self , *_A , _A=None , _A=None , **_A ): """simple docstring""" super().__init__(*_A , **_A ) __lowerCAmelCase = eval_examples __lowerCAmelCase = post_process_function def __SCREAMING_SNAKE_CASE( self , _A = None , _A=None , _A = None , _A = "eval" , **_A , ): """simple docstring""" __lowerCAmelCase = gen_kwargs.copy() __lowerCAmelCase = ( gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length ) __lowerCAmelCase = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams ) __lowerCAmelCase = gen_kwargs __lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset __lowerCAmelCase = self.get_eval_dataloader(_A ) __lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __lowerCAmelCase = self.compute_metrics __lowerCAmelCase = None __lowerCAmelCase = time.time() __lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowerCAmelCase = eval_loop( _A , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , ) finally: __lowerCAmelCase = compute_metrics __lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __lowerCAmelCase = self.post_process_function(_A , _A , _A ) __lowerCAmelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): __lowerCAmelCase = metrics.pop(_A ) metrics.update(output.metrics ) else: __lowerCAmelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_A ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A ) return metrics def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A = "test" , **_A ): """simple docstring""" __lowerCAmelCase = gen_kwargs.copy() __lowerCAmelCase = self.get_test_dataloader(_A ) # Temporarily disable metric computation, we will do it in the loop here. __lowerCAmelCase = self.compute_metrics __lowerCAmelCase = None __lowerCAmelCase = time.time() __lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowerCAmelCase = eval_loop( _A , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , ) finally: __lowerCAmelCase = compute_metrics __lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output __lowerCAmelCase = self.post_process_function(_A , _A , _A , "predict" ) __lowerCAmelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): __lowerCAmelCase = metrics.pop(_A ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
92
import json import os import torch from diffusers import UNetaDModel os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): if hor == 1_28: __lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") __lowerCAmelCase = (32, 1_28, 2_56) __lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D") elif hor == 32: __lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") __lowerCAmelCase = (32, 64, 1_28, 2_56) __lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") __lowerCAmelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" ) __lowerCAmelCase = model.state_dict() __lowerCAmelCase = { "down_block_types": down_block_types, "block_out_channels": block_out_channels, "up_block_types": up_block_types, "layers_per_block": 1, "use_timestep_embedding": True, "out_block_type": "OutConv1DBlock", "norm_num_groups": 8, "downsample_each_block": False, "in_channels": 14, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "flip_sin_to_cos": False, "freq_shift": 1, "sample_size": 6_55_36, "mid_block_type": "MidResTemporalBlock1D", "act_fn": "mish", } __lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ ) torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" ) with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = { "in_channels": 14, "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": (), "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": (32, 64, 1_28, 2_56), "layers_per_block": 1, "downsample_each_block": True, "sample_size": 6_55_36, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1, "norm_num_groups": 8, "act_fn": "mish", } __lowerCAmelCase = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" ) __lowerCAmelCase = model __lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ ) torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" ) with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
92
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a__ ( snake_case__ ): _a : Tuple = ["""image_processor""", """tokenizer"""] _a : Union[str, Any] = """CLIPImageProcessor""" _a : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , _A=None , _A=None , **_A ): """simple docstring""" __lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _A , ) __lowerCAmelCase = kwargs.pop("feature_extractor" ) __lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_A , _A ) def __call__( self , _A=None , _A=None , _A=None , **_A ): """simple docstring""" if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: __lowerCAmelCase = self.tokenizer(_A , return_tensors=_A , **_A ) if images is not None: __lowerCAmelCase = self.image_processor(_A , return_tensors=_A , **_A ) if text is not None and images is not None: __lowerCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A ) , tensor_type=_A ) def __SCREAMING_SNAKE_CASE( self , *_A , **_A ): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A ) def __SCREAMING_SNAKE_CASE( self , *_A , **_A ): """simple docstring""" return self.tokenizer.decode(*_A , **_A ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.tokenizer.model_input_names __lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _A , ) return self.image_processor_class @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _A , ) return self.image_processor
92
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): class a__ : def __init__( self , _A ): """simple docstring""" __lowerCAmelCase = metric_id class a__ : _a : Optional[int] = [MetricMock(snake_case__ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ): if "tmp_path" in args: __lowerCAmelCase = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(SCREAMING_SNAKE_CASE_ , match="https://huggingface.co/docs/evaluate" ): func(*SCREAMING_SNAKE_CASE_ )
92
1
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available UpperCamelCase__ = logging.getLogger(__name__) @dataclass class a__ : _a : str _a : List[str] _a : Optional[List[str]] @dataclass class a__ : _a : List[int] _a : List[int] _a : Optional[List[int]] = None _a : Optional[List[int]] = None class a__ ( snake_case__ ): _a : Optional[int] = """train""" _a : int = """dev""" _a : Tuple = """test""" class a__ : @staticmethod def __SCREAMING_SNAKE_CASE( _A , _A ): """simple docstring""" raise NotImplementedError @staticmethod def __SCREAMING_SNAKE_CASE( _A ): """simple docstring""" raise NotImplementedError @staticmethod def __SCREAMING_SNAKE_CASE( _A , _A , _A , _A , _A=False , _A="[CLS]" , _A=1 , _A="[SEP]" , _A=False , _A=False , _A=0 , _A=0 , _A=-1_0_0 , _A=0 , _A=True , ): """simple docstring""" __lowerCAmelCase = {label: i for i, label in enumerate(_A )} __lowerCAmelCase = [] for ex_index, example in enumerate(_A ): if ex_index % 1_0_0_0_0 == 0: logger.info("Writing example %d of %d" , _A , len(_A ) ) __lowerCAmelCase = [] __lowerCAmelCase = [] for word, label in zip(example.words , example.labels ): __lowerCAmelCase = tokenizer.tokenize(_A ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(_A ) > 0: tokens.extend(_A ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_A ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. __lowerCAmelCase = tokenizer.num_special_tokens_to_add() if len(_A ) > max_seq_length - special_tokens_count: __lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)] __lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] __lowerCAmelCase = [sequence_a_segment_id] * len(_A ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: __lowerCAmelCase = [cls_token] + tokens __lowerCAmelCase = [pad_token_label_id] + label_ids __lowerCAmelCase = [cls_token_segment_id] + segment_ids __lowerCAmelCase = tokenizer.convert_tokens_to_ids(_A ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. __lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(_A ) # Zero-pad up to the sequence length. __lowerCAmelCase = max_seq_length - len(_A ) if pad_on_left: __lowerCAmelCase = ([pad_token] * padding_length) + input_ids __lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask __lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids __lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length assert len(_A ) == max_seq_length if ex_index < 5: logger.info("*** Example ***" ) logger.info("guid: %s" , example.guid ) logger.info("tokens: %s" , " ".join([str(_A ) for x in tokens] ) ) logger.info("input_ids: %s" , " ".join([str(_A ) for x in input_ids] ) ) logger.info("input_mask: %s" , " ".join([str(_A ) for x in input_mask] ) ) logger.info("segment_ids: %s" , " ".join([str(_A ) for x in segment_ids] ) ) logger.info("label_ids: %s" , " ".join([str(_A ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: __lowerCAmelCase = None features.append( InputFeatures( input_ids=_A , attention_mask=_A , token_type_ids=_A , label_ids=_A ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class a__ ( snake_case__ ): _a : List[InputFeatures] _a : int = nn.CrossEntropyLoss().ignore_index def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ): """simple docstring""" __lowerCAmelCase = os.path.join( _A , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(_A ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowerCAmelCase = cached_features_file + ".lock" with FileLock(_A ): if os.path.exists(_A ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __lowerCAmelCase = torch.load(_A ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __lowerCAmelCase = token_classification_task.read_examples_from_file(_A , _A ) # TODO clean up all this to leverage built-in features of tokenizers __lowerCAmelCase = token_classification_task.convert_examples_to_features( _A , _A , _A , _A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(f"""Saving features into cached file {cached_features_file}""" ) torch.save(self.features , _A ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _A ): """simple docstring""" return self.features[i] if is_tf_available(): import tensorflow as tf class a__ : _a : List[InputFeatures] _a : int = -1_0_0 def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ): """simple docstring""" __lowerCAmelCase = token_classification_task.read_examples_from_file(_A , _A ) # TODO clean up all this to leverage built-in features of tokenizers __lowerCAmelCase = token_classification_task.convert_examples_to_features( _A , _A , _A , _A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: __lowerCAmelCase = tf.data.Dataset.from_generator( _A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , ( {"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: __lowerCAmelCase = tf.data.Dataset.from_generator( _A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , ( { "input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] ), "token_type_ids": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _A ): """simple docstring""" return self.features[i]
92
from random import randint from tempfile import TemporaryFile import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ): __lowerCAmelCase = 0 if start < end: __lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = a[end] __lowerCAmelCase = a[pivot] __lowerCAmelCase = temp __lowerCAmelCase , __lowerCAmelCase = _in_place_partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , p - 1 ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , p + 1 , SCREAMING_SNAKE_CASE_ ) return count def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = 0 __lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = a[end] __lowerCAmelCase = a[pivot] __lowerCAmelCase = temp __lowerCAmelCase = start - 1 for index in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value __lowerCAmelCase = new_pivot_index + 1 __lowerCAmelCase = a[new_pivot_index] __lowerCAmelCase = a[index] __lowerCAmelCase = temp __lowerCAmelCase = a[new_pivot_index + 1] __lowerCAmelCase = a[end] __lowerCAmelCase = temp return new_pivot_index + 1, count UpperCamelCase__ = TemporaryFile() UpperCamelCase__ = 100 # 1000 elements are to be sorted UpperCamelCase__ , UpperCamelCase__ = 0, 1 # mean and standard deviation UpperCamelCase__ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array UpperCamelCase__ = np.load(outfile) UpperCamelCase__ = len(M) - 1 UpperCamelCase__ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
92
1
from __future__ import annotations from collections.abc import Callable def _a ( SCREAMING_SNAKE_CASE_ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int = 1_00 , ): __lowerCAmelCase = x_start __lowerCAmelCase = fnc(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = 0.0 for _ in range(SCREAMING_SNAKE_CASE_ ): # Approximates small segments of curve as linear and solve # for trapezoidal area __lowerCAmelCase = (x_end - x_start) / steps + xa __lowerCAmelCase = fnc(SCREAMING_SNAKE_CASE_ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __lowerCAmelCase = xa __lowerCAmelCase = fxa return area if __name__ == "__main__": def _a ( SCREAMING_SNAKE_CASE_ : Tuple ): return x**3 + x**2 print("""f(x) = x^3 + x^2""") print("""The area between the curve, x = -5, x = 5 and the x axis is:""") UpperCamelCase__ = 10 while i <= 100000: print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
92
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available UpperCamelCase__ = { """configuration_audio_spectrogram_transformer""": [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ASTConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ASTForAudioClassification""", """ASTModel""", """ASTPreTrainedModel""", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""ASTFeatureExtractor"""] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
1
import datasets from .evaluate import evaluate UpperCamelCase__ = """\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } """ UpperCamelCase__ = """ This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. """ UpperCamelCase__ = """ Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': the text of the answer references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the SQuAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] >>> squad_metric = datasets.load_metric(\"squad\") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )}, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , ) def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} __lowerCAmelCase = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] __lowerCAmelCase = evaluate(dataset=_A , predictions=_A ) return score
92
import argparse import os import re import packaging.version UpperCamelCase__ = """examples/""" UpperCamelCase__ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCamelCase__ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCamelCase__ = """README.md""" def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern] __lowerCAmelCase = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not patch: update_version_in_examples(SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = "🤗 Transformers currently provides the following architectures" __lowerCAmelCase = "1. Want to contribute a new model?" with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.readlines() # Find the start of the list. __lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): __lowerCAmelCase = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) def _a ( ): with open(REPLACE_FILES["init"] , "r" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0] return packaging.version.parse(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=False ): __lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: __lowerCAmelCase = default_version.base_version elif patch: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = default_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def _a ( ): __lowerCAmelCase = get_version() __lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __lowerCAmelCase = current_version.base_version # Check with the user we got that right. __lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = dev_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCamelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
92
1
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) UpperCamelCase__ = logging.getLogger() def _a ( SCREAMING_SNAKE_CASE_ : Tuple ): __lowerCAmelCase = {} __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , "all_results.json" ) if os.path.exists(SCREAMING_SNAKE_CASE_ ): with open(SCREAMING_SNAKE_CASE_ , "r" ) as f: __lowerCAmelCase = json.load(SCREAMING_SNAKE_CASE_ ) else: raise ValueError(F"""can't find {path}""" ) return results UpperCamelCase__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class a__ ( snake_case__ ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" import xla_spawn __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = f""" ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(_A , "argv" , _A ): __lowerCAmelCase = time() xla_spawn.main() __lowerCAmelCase = time() __lowerCAmelCase = get_results(_A ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 5_0_0 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" import xla_spawn __lowerCAmelCase = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split() with patch.object(_A , "argv" , _A ): xla_spawn.main()
92
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a__ ( snake_case__ , unittest.TestCase ): _a : Dict = KandinskyImgaImgPipeline _a : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""] _a : str = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", ] _a : List[Any] = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] _a : int = False @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.time_input_dim @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.time_input_dim * 4 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 1_0_0 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , ) __lowerCAmelCase = MultilingualCLIP(_A ) __lowerCAmelCase = text_encoder.eval() return text_encoder @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase = UNetaDConditionModel(**_A ) return model @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = { "num_train_timesteps": 1_0_0_0, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __lowerCAmelCase = DDIMScheduler(**_A ) __lowerCAmelCase = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ): """simple docstring""" __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A ) __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) if str(_A ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(_A ) else: __lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) __lowerCAmelCase = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 1_0, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**_A ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(_A ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowerCAmelCase = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __lowerCAmelCase = "A red cartoon frog, 4k" __lowerCAmelCase = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(_A ) __lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( _A , generator=_A , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __lowerCAmelCase = pipeline( _A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(_A , _A )
92
1
from jiwer import compute_measures import datasets UpperCamelCase__ = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ UpperCamelCase__ = """\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. """ UpperCamelCase__ = """ Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> wer = datasets.load_metric(\"wer\") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def __SCREAMING_SNAKE_CASE( self , _A=None , _A=None , _A=False ): """simple docstring""" if concatenate_texts: return compute_measures(_A , _A )["wer"] else: __lowerCAmelCase = 0 __lowerCAmelCase = 0 for prediction, reference in zip(_A , _A ): __lowerCAmelCase = compute_measures(_A , _A ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
92
class a__ ( snake_case__ ): pass class a__ ( snake_case__ ): pass class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [ [], [], [], ] def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" try: if len(self.queues[priority] ) >= 1_0_0: raise OverflowError("Maximum queue size is 100" ) self.queues[priority].append(_A ) except IndexError: raise ValueError("Valid priorities are 0, 1, and 2" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("All queues are empty" ) def __str__( self ): """simple docstring""" return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) ) class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [] def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if len(self.queue ) == 1_0_0: raise OverFlowError("Maximum queue size is 100" ) self.queue.append(_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.queue: raise UnderFlowError("The queue is empty" ) else: __lowerCAmelCase = min(self.queue ) self.queue.remove(_A ) return data def __str__( self ): """simple docstring""" return str(self.queue ) def _a ( ): __lowerCAmelCase = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 1_00 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _a ( ): __lowerCAmelCase = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(1_00 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
92
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {"""vocab_file""": """spiece.model"""} UpperCamelCase__ = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", } } UpperCamelCase__ = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } # Segments (not really needed) UpperCamelCase__ = 0 UpperCamelCase__ = 1 UpperCamelCase__ = 2 UpperCamelCase__ = 3 UpperCamelCase__ = 4 class a__ ( snake_case__ ): _a : Tuple = VOCAB_FILES_NAMES _a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : Optional[Any] = """left""" def __init__( self , _A , _A=False , _A=True , _A=False , _A="<s>" , _A="</s>" , _A="<unk>" , _A="<sep>" , _A="<pad>" , _A="<cls>" , _A="<mask>" , _A=["<eop>", "<eod>"] , _A = None , **_A , ): """simple docstring""" __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token __lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) __lowerCAmelCase = 3 __lowerCAmelCase = do_lower_case __lowerCAmelCase = remove_space __lowerCAmelCase = keep_accents __lowerCAmelCase = vocab_file __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_A ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return len(self.sp_model ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" __lowerCAmelCase = self.__dict__.copy() __lowerCAmelCase = None return state def __setstate__( self , _A ): """simple docstring""" __lowerCAmelCase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __lowerCAmelCase = {} __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if self.remove_space: __lowerCAmelCase = " ".join(inputs.strip().split() ) else: __lowerCAmelCase = inputs __lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: __lowerCAmelCase = unicodedata.normalize("NFKD" , _A ) __lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_A )] ) if self.do_lower_case: __lowerCAmelCase = outputs.lower() return outputs def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = self.preprocess_text(_A ) __lowerCAmelCase = self.sp_model.encode(_A , out_type=_A ) __lowerCAmelCase = [] for piece in pieces: if len(_A ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): __lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __lowerCAmelCase = cur_pieces[1:] else: __lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_A ) else: new_pieces.append(_A ) return new_pieces def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.sp_model.PieceToId(_A ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.sp_model.IdToPiece(_A ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = "".join(_A ).replace(_A , " " ).strip() return out_string def __SCREAMING_SNAKE_CASE( self , _A , _A = False , _A = None , _A = True , **_A , ): """simple docstring""" __lowerCAmelCase = kwargs.pop("use_source_tokenizer" , _A ) __lowerCAmelCase = self.convert_ids_to_tokens(_A , skip_special_tokens=_A ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __lowerCAmelCase = [] __lowerCAmelCase = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_A ) ) __lowerCAmelCase = [] sub_texts.append(_A ) else: current_sub_text.append(_A ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_A ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens __lowerCAmelCase = "".join(_A ) __lowerCAmelCase = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __lowerCAmelCase = self.clean_up_tokenization(_A ) return clean_text else: return text def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is not None: return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1] return ([0] * len(_A )) + [1, 1] def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( _A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , "wb" ) as fi: __lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,)
92
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a__ : def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = scope __lowerCAmelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCAmelCase = (image_size // patch_size) ** 2 __lowerCAmelCase = num_patches + 2 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTModel(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTForMaskedImageModeling(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForMaskedImageModeling(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = self.type_sequence_label_size __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a__ ( snake_case__ , snake_case__ , unittest.TestCase ): _a : Optional[Any] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _a : int = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _a : Optional[Any] = False _a : Tuple = False _a : Tuple = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ): """simple docstring""" __lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.model_tester.is_training: return __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_A ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __lowerCAmelCase = False __lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue __lowerCAmelCase = model_class(_A ) model.gradient_checkpointing_enable() model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_A ), *get_values(_A ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ): __lowerCAmelCase = problem_type["title"] __lowerCAmelCase = problem_type["num_labels"] __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) if problem_type["num_labels"] > 1: __lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) __lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_A ) as warning_list: __lowerCAmelCase = model(**_A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = DeiTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _a ( ): __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( _A ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**_A ) # verify the logits __lowerCAmelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _A ) __lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ) __lowerCAmelCase = inputs.pixel_values.to(_A ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __lowerCAmelCase = model(_A )
92
1
import baseaa def _a ( SCREAMING_SNAKE_CASE_ : str ): return baseaa.baaencode(string.encode("utf-8" ) ) def _a ( SCREAMING_SNAKE_CASE_ : bytes ): return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode("utf-8" ) if __name__ == "__main__": UpperCamelCase__ = """Hello World!""" UpperCamelCase__ = baseaa_encode(test) print(encoded) UpperCamelCase__ = baseaa_decode(encoded) print(decoded)
92
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ): __lowerCAmelCase = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
92
1
import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): UpperCamelCase__ = True from torch.cuda.amp import autocast UpperCamelCase__ = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=None ): return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ ) @dataclass class a__ : _a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) _a : Optional[bool] = field( default=snake_case__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) _a : Optional[float] = field( default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} ) _a : Optional[float] = field( default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} ) _a : Optional[float] = field( default=0.1 , metadata={ """help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.""" } , ) _a : Optional[float] = field( default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , ) _a : Optional[float] = field( default=0.05 , metadata={ """help""": ( """Propability of each feature vector along the time axis to be chosen as the start of the vector""" """span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature""" """vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.""" ) } , ) _a : Optional[float] = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} ) @dataclass class a__ : _a : Optional[str] = field( default=snake_case__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) _a : Optional[str] = field( default="""train+validation""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) _a : bool = field( default=snake_case__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) _a : Optional[int] = field( default=snake_case__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) _a : Optional[int] = field( default=snake_case__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) _a : Optional[int] = field( default=snake_case__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of validation examples to this """ """value if set.""" ) } , ) _a : List[str] = list_field( default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , ) @dataclass class a__ : _a : WavaVecaProcessor _a : Union[bool, str] = True _a : Optional[int] = None _a : Optional[int] = None _a : Optional[int] = None _a : Optional[int] = None def __call__( self , _A ): """simple docstring""" __lowerCAmelCase = [{"input_values": feature["input_values"]} for feature in features] __lowerCAmelCase = [{"input_ids": feature["labels"]} for feature in features] __lowerCAmelCase = self.processor.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) __lowerCAmelCase = self.processor.pad( labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , ) # replace padding with -100 to ignore loss correctly __lowerCAmelCase = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 ) __lowerCAmelCase = labels return batch class a__ ( snake_case__ ): def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" model.train() __lowerCAmelCase = self._prepare_inputs(_A ) if self.use_amp: with autocast(): __lowerCAmelCase = self.compute_loss(_A , _A ) else: __lowerCAmelCase = self.compute_loss(_A , _A ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": __lowerCAmelCase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __lowerCAmelCase = loss.sum() / (inputs["labels"] >= 0).sum() else: raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" ) if self.args.gradient_accumulation_steps > 1: __lowerCAmelCase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(_A ).backward() elif self.use_apex: with amp.scale_loss(_A , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(_A ) else: loss.backward() return loss.detach() def _a ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE_ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: __lowerCAmelCase = datasets.load_dataset( "common_voice" , data_args.dataset_config_name , split=data_args.train_split_name ) __lowerCAmelCase = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" ) # Create and save tokenizer __lowerCAmelCase = F"""[{''.join(data_args.chars_to_ignore )}]""" def remove_special_characters(SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = re.sub(SCREAMING_SNAKE_CASE_ , "" , batch["sentence"] ).lower() + " " return batch __lowerCAmelCase = train_dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=["sentence"] ) __lowerCAmelCase = eval_dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=["sentence"] ) def extract_all_chars(SCREAMING_SNAKE_CASE_ : Tuple ): __lowerCAmelCase = " ".join(batch["text"] ) __lowerCAmelCase = list(set(SCREAMING_SNAKE_CASE_ ) ) return {"vocab": [vocab], "all_text": [all_text]} __lowerCAmelCase = train_dataset.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , ) __lowerCAmelCase = train_dataset.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , ) __lowerCAmelCase = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) ) __lowerCAmelCase = {v: k for k, v in enumerate(SCREAMING_SNAKE_CASE_ )} __lowerCAmelCase = vocab_dict[" "] del vocab_dict[" "] __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) with open("vocab.json" , "w" ) as vocab_file: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = WavaVecaCTCTokenizer( "vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , ) __lowerCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , ) if data_args.max_train_samples is not None: __lowerCAmelCase = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples ) __lowerCAmelCase = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) ) if data_args.max_val_samples is not None: __lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) ) __lowerCAmelCase = torchaudio.transforms.Resample(4_80_00 , 1_60_00 ) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(SCREAMING_SNAKE_CASE_ : str ): __lowerCAmelCase , __lowerCAmelCase = torchaudio.load(batch["path"] ) __lowerCAmelCase = resampler(SCREAMING_SNAKE_CASE_ ).squeeze().numpy() __lowerCAmelCase = 1_60_00 __lowerCAmelCase = batch["text"] return batch __lowerCAmelCase = train_dataset.map( SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) __lowerCAmelCase = eval_dataset.map( SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(SCREAMING_SNAKE_CASE_ : Tuple ): # check that all files have the correct sampling rate assert ( len(set(batch["sampling_rate"] ) ) == 1 ), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.""" __lowerCAmelCase = processor( audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] ) batch.update(SCREAMING_SNAKE_CASE_ ) return batch __lowerCAmelCase = train_dataset.map( SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , ) __lowerCAmelCase = eval_dataset.map( SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , ) # Metric __lowerCAmelCase = datasets.load_metric("wer" ) def compute_metrics(SCREAMING_SNAKE_CASE_ : Tuple ): __lowerCAmelCase = pred.predictions __lowerCAmelCase = np.argmax(SCREAMING_SNAKE_CASE_ , axis=-1 ) __lowerCAmelCase = processor.tokenizer.pad_token_id __lowerCAmelCase = processor.batch_decode(SCREAMING_SNAKE_CASE_ ) # we do not want to group tokens when computing the metrics __lowerCAmelCase = processor.batch_decode(pred.label_ids , group_tokens=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = wer_metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ ) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator __lowerCAmelCase = DataCollatorCTCWithPadding(processor=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) # Initialize our Trainer __lowerCAmelCase = CTCTrainer( model=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: __lowerCAmelCase = last_checkpoint elif os.path.isdir(model_args.model_name_or_path ): __lowerCAmelCase = model_args.model_name_or_path else: __lowerCAmelCase = None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank ): processor.save_pretrained(training_args.output_dir ) __lowerCAmelCase = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ ) trainer.save_model() __lowerCAmelCase = train_result.metrics __lowerCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ ) ) __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ ) trainer.save_state() # Evaluation __lowerCAmelCase = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) __lowerCAmelCase = trainer.evaluate() __lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ ) return results if __name__ == "__main__": main()
92
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( """The `image_to_image.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionImg2ImgPipeline` instead.""" )
92
1
from collections import deque from .hash_table import HashTable class a__ ( snake_case__ ): def __init__( self , *_A , **_A ): """simple docstring""" super().__init__(*_A , **_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_A ) __lowerCAmelCase = self.values[key] def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return ( sum(self.charge_factor - len(_A ) for slot in self.values ) / self.size_table * self.charge_factor ) def __SCREAMING_SNAKE_CASE( self , _A , _A=None ): """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_A ) == 0 ): return key return super()._collision_resolution(_A , _A )
92
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType UpperCamelCase__ = get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if accelerator.process_index == 0: logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {ckpt_dir}""" ) __lowerCAmelCase = {"model": state_dict} dist_cp.save_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Model saved to {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) if F"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading model from {ckpt_dir}""" ) __lowerCAmelCase = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , ) __lowerCAmelCase = state_dict["model"] logger.info(F"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state saved in {output_optimizer_file}""" ) else: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Optimizer state saved in {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" ) else: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if F"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading Optimizer from {ckpt_dir}""" ) __lowerCAmelCase = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , ) __lowerCAmelCase = optim_state["optimizer"] logger.info(F"""Optimizer loaded from {ckpt_dir}""" ) __lowerCAmelCase = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
92
1
from ..utils import DummyObject, requires_backends class a__ ( metaclass=snake_case__ ): _a : Any = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : int = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : Union[str, Any] = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : int = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : int = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : Tuple = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : List[Any] = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : int = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : Tuple = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : Optional[int] = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : str = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : str = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) class a__ ( metaclass=snake_case__ ): _a : List[str] = ["""flax"""] def __init__( self , *_A , **_A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ): """simple docstring""" requires_backends(cls , ["flax"] )
92
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class a__ ( snake_case__ ): def __init__( self , *_A , _A=None , _A=None , **_A ): """simple docstring""" super().__init__(*_A , **_A ) __lowerCAmelCase = eval_examples __lowerCAmelCase = post_process_function def __SCREAMING_SNAKE_CASE( self , _A = None , _A=None , _A = None , _A = "eval" , **_A , ): """simple docstring""" __lowerCAmelCase = gen_kwargs.copy() __lowerCAmelCase = ( gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length ) __lowerCAmelCase = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams ) __lowerCAmelCase = gen_kwargs __lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset __lowerCAmelCase = self.get_eval_dataloader(_A ) __lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __lowerCAmelCase = self.compute_metrics __lowerCAmelCase = None __lowerCAmelCase = time.time() __lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowerCAmelCase = eval_loop( _A , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , ) finally: __lowerCAmelCase = compute_metrics __lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __lowerCAmelCase = self.post_process_function(_A , _A , _A ) __lowerCAmelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): __lowerCAmelCase = metrics.pop(_A ) metrics.update(output.metrics ) else: __lowerCAmelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_A ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A ) return metrics def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A = "test" , **_A ): """simple docstring""" __lowerCAmelCase = gen_kwargs.copy() __lowerCAmelCase = self.get_test_dataloader(_A ) # Temporarily disable metric computation, we will do it in the loop here. __lowerCAmelCase = self.compute_metrics __lowerCAmelCase = None __lowerCAmelCase = time.time() __lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowerCAmelCase = eval_loop( _A , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , ) finally: __lowerCAmelCase = compute_metrics __lowerCAmelCase = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output __lowerCAmelCase = self.post_process_function(_A , _A , _A , "predict" ) __lowerCAmelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): __lowerCAmelCase = metrics.pop(_A ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
92
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class a__ ( snake_case__ ): _a : List[Any] = """altclip_text_model""" def __init__( self , _A=2_5_0_0_0_2 , _A=1_0_2_4 , _A=2_4 , _A=1_6 , _A=4_0_9_6 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_4 , _A=1 , _A=0.02 , _A=0.02 , _A=1E-0_5 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=7_6_8 , **_A , ): """simple docstring""" super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = initializer_factor __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = project_dim class a__ ( snake_case__ ): _a : List[Any] = """altclip_vision_model""" def __init__( self , _A=7_6_8 , _A=3_0_7_2 , _A=5_1_2 , _A=1_2 , _A=1_2 , _A=3 , _A=2_2_4 , _A=3_2 , _A="quick_gelu" , _A=1E-5 , _A=0.0 , _A=0.02 , _A=1.0 , **_A , ): """simple docstring""" super().__init__(**_A ) __lowerCAmelCase = hidden_size __lowerCAmelCase = intermediate_size __lowerCAmelCase = projection_dim __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = num_channels __lowerCAmelCase = patch_size __lowerCAmelCase = image_size __lowerCAmelCase = initializer_range __lowerCAmelCase = initializer_factor __lowerCAmelCase = attention_dropout __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = hidden_act @classmethod def __SCREAMING_SNAKE_CASE( cls , _A , **_A ): """simple docstring""" cls._set_token_in_kwargs(_A ) __lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(_A , **_A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("model_type" ) == "altclip": __lowerCAmelCase = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_A , **_A ) class a__ ( snake_case__ ): _a : Any = """altclip""" _a : Optional[int] = True def __init__( self , _A=None , _A=None , _A=7_6_8 , _A=2.65_92 , **_A ): """simple docstring""" __lowerCAmelCase = kwargs.pop("text_config_dict" , _A ) __lowerCAmelCase = kwargs.pop("vision_config_dict" , _A ) super().__init__(**_A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __lowerCAmelCase = {} # This is the complete result when using `text_config_dict`. __lowerCAmelCase = AltCLIPTextConfig(**_A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __lowerCAmelCase = ( f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """ f"""The value `text_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: __lowerCAmelCase = ( f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """ f"""value `text_config[\"{key}\"]` will be overriden.""" ) logger.warning(_A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __lowerCAmelCase = {} # This is the complete result when using `vision_config_dict`. __lowerCAmelCase = AltCLIPVisionConfig(**_A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __lowerCAmelCase = { str(_A ): value for key, value in _vision_config_dict["id2label"].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __lowerCAmelCase = ( f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """ f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: __lowerCAmelCase = ( f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """ f"""The value `vision_config[\"{key}\"]` will be overriden.""" ) logger.warning(_A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __lowerCAmelCase = {} logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." ) if vision_config is None: __lowerCAmelCase = {} logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." ) __lowerCAmelCase = AltCLIPTextConfig(**_A ) __lowerCAmelCase = AltCLIPVisionConfig(**_A ) __lowerCAmelCase = projection_dim __lowerCAmelCase = logit_scale_init_value __lowerCAmelCase = 1.0 @classmethod def __SCREAMING_SNAKE_CASE( cls , _A , _A , **_A ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = copy.deepcopy(self.__dict__ ) __lowerCAmelCase = self.text_config.to_dict() __lowerCAmelCase = self.vision_config.to_dict() __lowerCAmelCase = self.__class__.model_type return output
92
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() ) __lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params UpperCamelCase__ = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ): if metric == "rouge2": __lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": __lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": __lowerCAmelCase = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" " function." ) __lowerCAmelCase = ModelCheckpoint( dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): return EarlyStopping( monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , ) class a__ ( pl.Callback ): def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=True ): """simple docstring""" logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) __lowerCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results __lowerCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": __lowerCAmelCase = od / "test_results.txt" __lowerCAmelCase = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" __lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , "a+" ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __lowerCAmelCase = metrics[key] if isinstance(_A , torch.Tensor ): __lowerCAmelCase = val.item() __lowerCAmelCase = f"""{key}: {val:.6f}\n""" writer.write(_A ) if not save_generations: return if "preds" in metrics: __lowerCAmelCase = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(_A ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" try: __lowerCAmelCase = pl_module.model.model.num_parameters() except AttributeError: __lowerCAmelCase = pl_module.model.num_parameters() __lowerCAmelCase = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , "test" ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
92
1
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any ): return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ): return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[column] ) def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=float("inf" ) ): for i in range(points_counts - 1 ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase = current_dis return min_dis def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=float("inf" ) ): for i in range(min(6 , points_counts - 1 ) , SCREAMING_SNAKE_CASE_ ): for j in range(max(0 , i - 6 ) , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase = current_dis return min_dis def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): # base case if points_counts <= 3: return dis_between_closest_pair(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # recursion __lowerCAmelCase = points_counts // 2 __lowerCAmelCase = closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE_ , points_sorted_on_y[:mid] , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE_ , points_sorted_on_y[mid:] , points_counts - mid ) __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = dis_between_closest_in_strip( SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = column_based_sort(SCREAMING_SNAKE_CASE_ , column=0 ) __lowerCAmelCase = column_based_sort(SCREAMING_SNAKE_CASE_ , column=1 ) return ( closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ** 0.5 if __name__ == "__main__": UpperCamelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("""Distance:""", closest_pair_of_points(points, len(points)))
92
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
92
1
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } UpperCamelCase__ = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } UpperCamelCase__ = {"""facebook/blenderbot-3B""": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _a ( ): __lowerCAmelCase = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __lowerCAmelCase = bs[:] __lowerCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE_ ) cs.append(2**8 + n ) n += 1 __lowerCAmelCase = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): __lowerCAmelCase = set() __lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCAmelCase = char return pairs class a__ ( snake_case__ ): _a : str = VOCAB_FILES_NAMES _a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : Union[str, Any] = ["""input_ids""", """attention_mask"""] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): """simple docstring""" __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding="utf-8" ) as vocab_handle: __lowerCAmelCase = json.load(_A ) __lowerCAmelCase = {v: k for k, v in self.encoder.items()} __lowerCAmelCase = errors # how to handle errors in decoding __lowerCAmelCase = bytes_to_unicode() __lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding="utf-8" ) as merges_handle: __lowerCAmelCase = merges_handle.read().split("\n" )[1:-1] __lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] __lowerCAmelCase = dict(zip(_A , range(len(_A ) ) ) ) __lowerCAmelCase = {} __lowerCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowerCAmelCase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return len(self.encoder ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if token in self.cache: return self.cache[token] __lowerCAmelCase = tuple(_A ) __lowerCAmelCase = get_pairs(_A ) if not pairs: return token while True: __lowerCAmelCase = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowerCAmelCase , __lowerCAmelCase = bigram __lowerCAmelCase = [] __lowerCAmelCase = 0 while i < len(_A ): try: __lowerCAmelCase = word.index(_A , _A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowerCAmelCase = j if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCAmelCase = tuple(_A ) __lowerCAmelCase = new_word if len(_A ) == 1: break else: __lowerCAmelCase = get_pairs(_A ) __lowerCAmelCase = " ".join(_A ) __lowerCAmelCase = word return word def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = [] for token in re.findall(self.pat , _A ): __lowerCAmelCase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) ) return bpe_tokens def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.decoder.get(_A ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = "".join(_A ) __lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( _A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowerCAmelCase = os.path.join( _A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + "\n" ) __lowerCAmelCase = 0 with open(_A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __lowerCAmelCase = token_index writer.write(" ".join(_A ) + "\n" ) index += 1 return vocab_file, merge_file def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return [1] + ([0] * len(_A )) + [1] return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1] def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __SCREAMING_SNAKE_CASE( self , _A , _A=False , **_A ): """simple docstring""" __lowerCAmelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()): __lowerCAmelCase = " " + text return (text, kwargs) def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" return token_ids_a + [self.eos_token_id] def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(_A ) __lowerCAmelCase = " ".join(_A ) __lowerCAmelCase = self.encode(_A ) if len(_A ) > self.model_max_length: __lowerCAmelCase = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
92
from queue import PriorityQueue from typing import Any import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : PriorityQueue , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue __lowerCAmelCase = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf ) __lowerCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) __lowerCAmelCase = new_cost_f __lowerCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ): __lowerCAmelCase = -1 __lowerCAmelCase = set() __lowerCAmelCase = set() __lowerCAmelCase = {source: 0} __lowerCAmelCase = {destination: 0} __lowerCAmelCase = {source: None} __lowerCAmelCase = {destination: None} __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __lowerCAmelCase , __lowerCAmelCase = queue_forward.get() visited_forward.add(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase = queue_backward.get() visited_backward.add(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = pass_and_relaxation( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) __lowerCAmelCase = pass_and_relaxation( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __lowerCAmelCase = shortest_distance return shortest_path_distance UpperCamelCase__ = { """B""": [["""C""", 1]], """C""": [["""D""", 1]], """D""": [["""F""", 1]], """E""": [["""B""", 1], ["""G""", 2]], """F""": [], """G""": [["""F""", 1]], } UpperCamelCase__ = { """B""": [["""E""", 1]], """C""": [["""B""", 1]], """D""": [["""C""", 1]], """F""": [["""D""", 1], ["""G""", 1]], """E""": [[None, np.inf]], """G""": [["""E""", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
92
1
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ): __lowerCAmelCase = set(range(3 , SCREAMING_SNAKE_CASE_ , 2 ) ) primes.add(2 ) for p in range(3 , SCREAMING_SNAKE_CASE_ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) __lowerCAmelCase = [float(SCREAMING_SNAKE_CASE_ ) for n in range(limit + 1 )] for p in primes: for n in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
92
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class a__ ( snake_case__ ): _a : Optional[int] = """decision_transformer""" _a : Optional[int] = ["""past_key_values"""] _a : Dict = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , _A=1_7 , _A=4 , _A=1_2_8 , _A=4_0_9_6 , _A=True , _A=1 , _A=1_0_2_4 , _A=3 , _A=1 , _A=None , _A="relu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , _A=False , **_A , ): """simple docstring""" __lowerCAmelCase = state_dim __lowerCAmelCase = act_dim __lowerCAmelCase = hidden_size __lowerCAmelCase = max_ep_len __lowerCAmelCase = action_tanh __lowerCAmelCase = vocab_size __lowerCAmelCase = n_positions __lowerCAmelCase = n_layer __lowerCAmelCase = n_head __lowerCAmelCase = n_inner __lowerCAmelCase = activation_function __lowerCAmelCase = resid_pdrop __lowerCAmelCase = embd_pdrop __lowerCAmelCase = attn_pdrop __lowerCAmelCase = layer_norm_epsilon __lowerCAmelCase = initializer_range __lowerCAmelCase = scale_attn_weights __lowerCAmelCase = use_cache __lowerCAmelCase = scale_attn_by_inverse_layer_idx __lowerCAmelCase = reorder_and_upcast_attn __lowerCAmelCase = bos_token_id __lowerCAmelCase = eos_token_id super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
92
1
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = F"""Expected string as input, found {type(SCREAMING_SNAKE_CASE_ )}""" raise ValueError(SCREAMING_SNAKE_CASE_ ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = F"""Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE_ )}""" raise ValueError(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = input_str.split("_" ) __lowerCAmelCase = 0 if use_pascal else 1 __lowerCAmelCase = words[start_index:] __lowerCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize] __lowerCAmelCase = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
92
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ): _a : str = StableUnCLIPPipeline _a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS _a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS _a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _a : Optional[Any] = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = 3_2 __lowerCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) ) torch.manual_seed(0 ) __lowerCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , ) torch.manual_seed(0 ) __lowerCAmelCase = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) __lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A ) __lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) ) torch.manual_seed(0 ) __lowerCAmelCase = UNetaDConditionModel( sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , ) torch.manual_seed(0 ) __lowerCAmelCase = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , ) torch.manual_seed(0 ) __lowerCAmelCase = AutoencoderKL() __lowerCAmelCase = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ): """simple docstring""" if str(_A ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(_A ) else: __lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) __lowerCAmelCase = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=_A ) @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) __lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" ) __lowerCAmelCase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowerCAmelCase = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) __lowerCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 1_0**9
92
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class a__ ( unittest.TestCase , snake_case__ ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = load_tool("text-classification" ) self.tool.setup() __lowerCAmelCase = load_tool("text-classification" , remote=_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(_A , "positive" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.remote_tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(_A , "positive" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(_A , "positive" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(_A , "positive" )
92
from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
1
def _a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): __lowerCAmelCase = set() # Replace all the whitespace in our sentence __lowerCAmelCase = input_str.replace(" " , "" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE_ ) == 26 def _a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): __lowerCAmelCase = [False] * 26 for char in input_str: if char.islower(): __lowerCAmelCase = True elif char.isupper(): __lowerCAmelCase = True return all(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _a ( ): from timeit import timeit __lowerCAmelCase = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" print(timeit("is_pangram()" , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit("is_pangram_faster()" , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit("is_pangram_fastest()" , setup=SCREAMING_SNAKE_CASE_ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
92
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case__ , unittest.TestCase ): _a : Optional[Any] = DebertaVaTokenizer _a : Optional[Any] = DebertaVaTokenizerFast _a : List[str] = True _a : Optional[Any] = True def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = DebertaVaTokenizer(_A , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = "this is a test" __lowerCAmelCase = "this is a test" return input_text, output_text def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "<pad>" __lowerCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(_A ) , 3_0_0_0_1 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = " \tHeLLo!how \n Are yoU? " __lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = " \tHeLLo!how \n Are yoU? " __lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on __lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "This is a test" __lowerCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9] __lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"] __lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] __lowerCAmelCase = DebertaVaTokenizer(_A , keep_accents=_A ) __lowerCAmelCase = DebertaVaTokenizerFast(_A , keep_accents=_A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) # fmt: off __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] __lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] __lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DebertaVaTokenizer(_A ) __lowerCAmelCase = tokenizer.encode("sequence builders" ) __lowerCAmelCase = tokenizer.encode("multi-sequence build" ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
92
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""", # See all DPT models at https://huggingface.co/models?filter=dpt } class a__ ( snake_case__ ): _a : Optional[Any] = """dpt""" def __init__( self , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.02 , _A=1E-1_2 , _A=3_8_4 , _A=1_6 , _A=3 , _A=False , _A=True , _A=[2, 5, 8, 1_1] , _A="project" , _A=[4, 2, 1, 0.5] , _A=[9_6, 1_9_2, 3_8_4, 7_6_8] , _A=2_5_6 , _A=-1 , _A=False , _A=True , _A=0.4 , _A=2_5_5 , _A=0.1 , _A=[1, 1_0_2_4, 2_4, 2_4] , _A=[0, 1] , _A=None , **_A , ): """simple docstring""" super().__init__(**_A ) __lowerCAmelCase = hidden_size __lowerCAmelCase = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("Initializing the config with a `BiT` backbone." ) __lowerCAmelCase = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } __lowerCAmelCase = BitConfig(**_A ) elif isinstance(_A , _A ): logger.info("Initializing the config with a `BiT` backbone." ) __lowerCAmelCase = BitConfig(**_A ) elif isinstance(_A , _A ): __lowerCAmelCase = backbone_config else: raise ValueError( f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" ) __lowerCAmelCase = backbone_featmap_shape __lowerCAmelCase = neck_ignore_stages if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." ) else: __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = [] __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = qkv_bias __lowerCAmelCase = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" ) __lowerCAmelCase = readout_type __lowerCAmelCase = reassemble_factors __lowerCAmelCase = neck_hidden_sizes __lowerCAmelCase = fusion_hidden_size __lowerCAmelCase = head_in_index __lowerCAmelCase = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) __lowerCAmelCase = use_auxiliary_head __lowerCAmelCase = auxiliary_loss_weight __lowerCAmelCase = semantic_loss_ignore_index __lowerCAmelCase = semantic_classifier_dropout def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __lowerCAmelCase = self.backbone_config.to_dict() __lowerCAmelCase = self.__class__.model_type return output
92
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf UpperCamelCase__ = logging.get_logger(__name__) @dataclass class a__ ( snake_case__ ): _a : List[str] = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **_A ): """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __lowerCAmelCase = deprecated_arg[3:] __lowerCAmelCase = not kwargs.pop(_A ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) __lowerCAmelCase = kwargs.pop("tpu_name" , self.tpu_name ) __lowerCAmelCase = kwargs.pop("device_idx" , self.device_idx ) __lowerCAmelCase = kwargs.pop("eager_mode" , self.eager_mode ) __lowerCAmelCase = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**_A ) _a : str = field( default=snake_case__ , metadata={"""help""": """Name of TPU"""} , ) _a : int = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) _a : bool = field(default=snake_case__ , metadata={"""help""": """Benchmark models in eager model."""} ) _a : bool = field( default=snake_case__ , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) __lowerCAmelCase = None if self.tpu: try: if self.tpu_name: __lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __lowerCAmelCase = None return tpu @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) __lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU __lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" ) return strategy @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return self._setup_strategy @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.n_gpu > 0
92
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") UpperCamelCase__ = """pt""" if is_torch_available() else """tf""" @require_sentencepiece @require_tokenizers class a__ ( snake_case__ , unittest.TestCase ): _a : int = CamembertTokenizer _a : Dict = CamembertTokenizerFast _a : Tuple = True _a : List[Any] = True def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase = CamembertTokenizer(_A ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "<pad>" __lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>NOTUSED" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(_A ) , 1_0_0_4 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = CamembertTokenizer(_A ) tokenizer.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.test_rust_tokenizer: return __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = "I was born in 92000, and this is falsé." __lowerCAmelCase = tokenizer.tokenize(_A ) __lowerCAmelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A ) __lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = tokenizer.encode(_A ) __lowerCAmelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = {"input_ids": [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. __lowerCAmelCase = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=_A , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_A , )
92
1
def _a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE_ )] for i in range(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = y_points[i] for i in range(2 , SCREAMING_SNAKE_CASE_ ): for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
92
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ): return x return (x, x) @require_tf class a__ : def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model} __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) __lowerCAmelCase = after_output[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = np.abs((a - b) ).max() self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_save_load(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs() __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A ) __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) __lowerCAmelCase = model_a(**_A ) __lowerCAmelCase = after_outputs[0].numpy() __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFViTModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFViTModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A ) __lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) __lowerCAmelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) __lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowerCAmelCase = to_atuple(vision_model.config.image_size ) __lowerCAmelCase = to_atuple(vision_model.config.patch_size ) __lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCAmelCase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFDeiTModel(_A , name="vision_model" ) __lowerCAmelCase = TFRobertaModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFDeiTModelTester(self ) __lowerCAmelCase = TFRobertaModelTester(self ) __lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class a__ ( snake_case__ , unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" ) __lowerCAmelCase = 1_3 __lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowerCAmelCase = random_attention_mask([batch_size, 4] ) __lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModel(_A , name="vision_model" ) __lowerCAmelCase = TFBertModel(_A , name="text_model" ) return vision_model, text_model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFCLIPVisionModelTester(self ) __lowerCAmelCase = TFBertModelTester(self ) __lowerCAmelCase = clip_model_tester.prepare_config_and_inputs() __lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class a__ ( unittest.TestCase ): @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_A ) __lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __lowerCAmelCase = processor( text=["una foto di un gatto", "una foto di un cane"] , images=_A , padding=_A , return_tensors="np" ) __lowerCAmelCase = model(**_A ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) )
92
1
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCamelCase__ = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): __lowerCAmelCase = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 ) return np.sum(outputs == labels ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): with open(SCREAMING_SNAKE_CASE_ , encoding="utf_8" ) as f: __lowerCAmelCase = csv.reader(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [] next(SCREAMING_SNAKE_CASE_ ) # skip the first line for line in tqdm(SCREAMING_SNAKE_CASE_ ): output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = [] for dataset in encoded_datasets: __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) __lowerCAmelCase = np.zeros((n_batch, 2) , dtype=np.intaa ) __lowerCAmelCase = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa ) __lowerCAmelCase = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __lowerCAmelCase = with_conta __lowerCAmelCase = with_conta __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1 __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1 __lowerCAmelCase = with_conta __lowerCAmelCase = with_conta __lowerCAmelCase = mc_label __lowerCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) ) return tensor_datasets def _a ( ): __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , default="openai-gpt" , help="pretrained model name" ) parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." ) parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." ) parser.add_argument( "--output_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The output directory where the model predictions and checkpoints will be written." , ) parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE_ , default="" ) parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE_ , default="" ) parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE_ , default=42 ) parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE_ , default=3 ) parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE_ , default=8 ) parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE_ , default=16 ) parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE_ , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE_ , default=1 ) parser.add_argument( "--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=( "If > 0: set total number of training steps to perform. Override num_train_epochs." ) , ) parser.add_argument( "--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , ) parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE_ , default=6.25E-5 ) parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE_ , help="Linear warmup over warmup_steps." ) parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE_ , default="warmup_linear" ) parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE_ , default=0.01 ) parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE_ , default=0.9 ) parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE_ , default=3_74 ) parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." ) __lowerCAmelCase = parser.parse_args() print(SCREAMING_SNAKE_CASE_ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) __lowerCAmelCase = torch.cuda.device_count() logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True." ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __lowerCAmelCase = ["_start_", "_delimiter_", "_classify_"] __lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) ) model.to(SCREAMING_SNAKE_CASE_ ) # Load and encode the datasets def tokenize_and_encode(SCREAMING_SNAKE_CASE_ : Union[str, Any] ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return obj return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj] logger.info("Encoding dataset..." ) __lowerCAmelCase = load_rocstories_dataset(args.train_dataset ) __lowerCAmelCase = load_rocstories_dataset(args.eval_dataset ) __lowerCAmelCase = (train_dataset, eval_dataset) __lowerCAmelCase = tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) # Compute the max input length for the Transformer __lowerCAmelCase = model.config.n_positions // 2 - 2 __lowerCAmelCase = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __lowerCAmelCase = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase , __lowerCAmelCase = tensor_datasets[0], tensor_datasets[1] __lowerCAmelCase = TensorDataset(*SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = RandomSampler(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size ) __lowerCAmelCase = TensorDataset(*SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = SequentialSampler(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __lowerCAmelCase = args.max_steps __lowerCAmelCase = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1 else: __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs __lowerCAmelCase = list(model.named_parameters() ) __lowerCAmelCase = ["bias", "LayerNorm.bias", "LayerNorm.weight"] __lowerCAmelCase = [ { "params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], "weight_decay": args.weight_decay, }, {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0}, ] __lowerCAmelCase = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon ) __lowerCAmelCase = get_linear_schedule_with_warmup( SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ ) if args.do_train: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ): __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = tqdm(SCREAMING_SNAKE_CASE_ , desc="Training" ) for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = batch __lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __lowerCAmelCase = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __lowerCAmelCase = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __lowerCAmelCase = model.module if hasattr(SCREAMING_SNAKE_CASE_ , "module" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __lowerCAmelCase = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ ) torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ ) model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(SCREAMING_SNAKE_CASE_ ) if args.do_eval: model.eval() __lowerCAmelCase , __lowerCAmelCase = 0, 0 __lowerCAmelCase , __lowerCAmelCase = 0, 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc="Evaluating" ): __lowerCAmelCase = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = batch with torch.no_grad(): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model( SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = mc_logits.detach().cpu().numpy() __lowerCAmelCase = mc_labels.to("cpu" ).numpy() __lowerCAmelCase = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __lowerCAmelCase = eval_loss / nb_eval_steps __lowerCAmelCase = eval_accuracy / nb_eval_examples __lowerCAmelCase = tr_loss / nb_tr_steps if args.do_train else None __lowerCAmelCase = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss} __lowerCAmelCase = os.path.join(args.output_dir , "eval_results.txt" ) with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer: logger.info("***** Eval results *****" ) for key in sorted(result.keys() ): logger.info(" %s = %s" , SCREAMING_SNAKE_CASE_ , str(result[key] ) ) writer.write("%s = %s\n" % (key, str(result[key] )) ) if __name__ == "__main__": main()
92
import json import os import torch from diffusers import UNetaDModel os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): if hor == 1_28: __lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") __lowerCAmelCase = (32, 1_28, 2_56) __lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D") elif hor == 32: __lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") __lowerCAmelCase = (32, 64, 1_28, 2_56) __lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") __lowerCAmelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" ) __lowerCAmelCase = model.state_dict() __lowerCAmelCase = { "down_block_types": down_block_types, "block_out_channels": block_out_channels, "up_block_types": up_block_types, "layers_per_block": 1, "use_timestep_embedding": True, "out_block_type": "OutConv1DBlock", "norm_num_groups": 8, "downsample_each_block": False, "in_channels": 14, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "flip_sin_to_cos": False, "freq_shift": 1, "sample_size": 6_55_36, "mid_block_type": "MidResTemporalBlock1D", "act_fn": "mish", } __lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ ) torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" ) with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = { "in_channels": 14, "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": (), "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": (32, 64, 1_28, 2_56), "layers_per_block": 1, "downsample_each_block": True, "sample_size": 6_55_36, "out_channels": 14, "extra_in_channels": 0, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1, "norm_num_groups": 8, "act_fn": "mish", } __lowerCAmelCase = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" ) __lowerCAmelCase = model __lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ ) print(F"""length of state dict: {len(state_dict.keys() )}""" ) print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) __lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ ) torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" ) with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
92
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {"""vocab_file""": """spm_char.model"""} UpperCamelCase__ = { """vocab_file""": { """microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""", """microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""", """microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""", } } UpperCamelCase__ = { """microsoft/speecht5_asr""": 1024, """microsoft/speecht5_tts""": 1024, """microsoft/speecht5_vc""": 1024, } class a__ ( snake_case__ ): _a : int = VOCAB_FILES_NAMES _a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : str = ["""input_ids""", """attention_mask"""] def __init__( self , _A , _A="<s>" , _A="</s>" , _A="<unk>" , _A="<pad>" , _A = None , **_A , ): """simple docstring""" __lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_A , eos_token=_A , unk_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) __lowerCAmelCase = vocab_file __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_A ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.sp_model.get_piece_size() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" __lowerCAmelCase = self.__dict__.copy() __lowerCAmelCase = None return state def __setstate__( self , _A ): """simple docstring""" __lowerCAmelCase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __lowerCAmelCase = {} __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.sp_model.encode(_A , out_type=_A ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.sp_model.piece_to_id(_A ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = self.sp_model.IdToPiece(_A ) return token def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = [] __lowerCAmelCase = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_A ) + token __lowerCAmelCase = [] else: current_sub_tokens.append(_A ) out_string += self.sp_model.decode(_A ) return out_string.strip() def __SCREAMING_SNAKE_CASE( self , _A , _A=None ): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) __lowerCAmelCase = [1] if token_ids_a is None: return ([0] * len(_A )) + suffix_ones return ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( _A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , "wb" ) as fi: __lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,)
92
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): class a__ : def __init__( self , _A ): """simple docstring""" __lowerCAmelCase = metric_id class a__ : _a : Optional[int] = [MetricMock(snake_case__ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ): if "tmp_path" in args: __lowerCAmelCase = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(SCREAMING_SNAKE_CASE_ , match="https://huggingface.co/docs/evaluate" ): func(*SCREAMING_SNAKE_CASE_ )
92
1
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( """The `image_to_image.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionImg2ImgPipeline` instead.""" )
92
from random import randint from tempfile import TemporaryFile import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ): __lowerCAmelCase = 0 if start < end: __lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = a[end] __lowerCAmelCase = a[pivot] __lowerCAmelCase = temp __lowerCAmelCase , __lowerCAmelCase = _in_place_partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , p - 1 ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , p + 1 , SCREAMING_SNAKE_CASE_ ) return count def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = 0 __lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = a[end] __lowerCAmelCase = a[pivot] __lowerCAmelCase = temp __lowerCAmelCase = start - 1 for index in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value __lowerCAmelCase = new_pivot_index + 1 __lowerCAmelCase = a[new_pivot_index] __lowerCAmelCase = a[index] __lowerCAmelCase = temp __lowerCAmelCase = a[new_pivot_index + 1] __lowerCAmelCase = a[end] __lowerCAmelCase = temp return new_pivot_index + 1, count UpperCamelCase__ = TemporaryFile() UpperCamelCase__ = 100 # 1000 elements are to be sorted UpperCamelCase__ , UpperCamelCase__ = 0, 1 # mean and standard deviation UpperCamelCase__ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array UpperCamelCase__ = np.load(outfile) UpperCamelCase__ = len(M) - 1 UpperCamelCase__ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
92
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = torch.device("""cpu""") def _a ( ): __lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im def _a ( SCREAMING_SNAKE_CASE_ : Any ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] ) def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = val def _a ( SCREAMING_SNAKE_CASE_ : str ): __lowerCAmelCase = [] for k in state_dict.keys(): __lowerCAmelCase = k if ".pwconv" in k: __lowerCAmelCase = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: __lowerCAmelCase = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: __lowerCAmelCase = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: __lowerCAmelCase = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: __lowerCAmelCase = k_new.split("." ) if ls[2].isdigit(): __lowerCAmelCase = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: __lowerCAmelCase = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ): __lowerCAmelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __lowerCAmelCase = 10_00 __lowerCAmelCase = "huggingface/label-files" __lowerCAmelCase = "imagenet-1k-id2label.json" __lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) __lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __lowerCAmelCase = [3, 3, 6, 4] __lowerCAmelCase = [48, 56, 1_12, 2_20] elif swiftformer_name == "swiftformer_s": __lowerCAmelCase = [3, 3, 9, 6] __lowerCAmelCase = [48, 64, 1_68, 2_24] elif swiftformer_name == "swiftformer_l1": __lowerCAmelCase = [4, 3, 10, 5] __lowerCAmelCase = [48, 96, 1_92, 3_84] elif swiftformer_name == "swiftformer_l3": __lowerCAmelCase = [4, 4, 12, 6] __lowerCAmelCase = [64, 1_28, 3_20, 5_12] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): __lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" , check_hash=SCREAMING_SNAKE_CASE_ ) else: __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" ) __lowerCAmelCase = checkpoint __lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load HuggingFace model __lowerCAmelCase = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ ).eval() hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # prepare test inputs __lowerCAmelCase = prepare_img() __lowerCAmelCase = ViTImageProcessor.from_pretrained("preprocessor_config" ) __lowerCAmelCase = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) # compare outputs from both models __lowerCAmelCase = get_expected_output(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 10_00] ) assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") UpperCamelCase__ = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
92
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available UpperCamelCase__ = { """configuration_audio_spectrogram_transformer""": [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ASTConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ASTForAudioClassification""", """ASTModel""", """ASTPreTrainedModel""", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["""ASTFeatureExtractor"""] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
1
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""", datefmt="""%Y-%m-%d %H:%M:%S""", level=os.environ.get("""LOGLEVEL""", """INFO""").upper(), stream=sys.stdout, ) UpperCamelCase__ = logging.getLogger(__name__) UpperCamelCase__ = {"""facebook/bart-base""": BartForConditionalGeneration} UpperCamelCase__ = {"""facebook/bart-base""": BartTokenizer} def _a ( ): __lowerCAmelCase = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." ) parser.add_argument( "--validation_file" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length" , type=SCREAMING_SNAKE_CASE_ , default=5 , help="The maximum total input sequence length after tokenization." , ) parser.add_argument( "--num_beams" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ) , ) parser.add_argument( "--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , ) parser.add_argument( "--config_name" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="Pretrained config name or path if not the same as model_name" , ) parser.add_argument( "--device" , type=SCREAMING_SNAKE_CASE_ , default="cpu" , help="Device where the model will be run" , ) parser.add_argument("--output_file_path" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="Where to store the final ONNX file." ) __lowerCAmelCase = parser.parse_args() return args def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int]="cpu" ): __lowerCAmelCase = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ ) if model_name in ["facebook/bart-base"]: __lowerCAmelCase = 0 __lowerCAmelCase = None __lowerCAmelCase = 0 return huggingface_model, tokenizer def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ): model.eval() __lowerCAmelCase = None __lowerCAmelCase = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE_ ) ) with torch.no_grad(): __lowerCAmelCase = "My friends are cool but they eat too many carbs." __lowerCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="pt" ).to(model.device ) __lowerCAmelCase = model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , early_stopping=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( SCREAMING_SNAKE_CASE_ , ( inputs["input_ids"], inputs["attention_mask"], num_beams, max_length, model.config.decoder_start_token_id, ) , SCREAMING_SNAKE_CASE_ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={ "input_ids": {0: "batch", 1: "seq"}, "output_ids": {0: "batch", 1: "seq_out"}, } , example_outputs=SCREAMING_SNAKE_CASE_ , ) logger.info("Model exported to {}".format(SCREAMING_SNAKE_CASE_ ) ) __lowerCAmelCase = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE_ ) ) logger.info("Deduplicated and optimized model written to {}".format(SCREAMING_SNAKE_CASE_ ) ) __lowerCAmelCase = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = ort_sess.run( SCREAMING_SNAKE_CASE_ , { "input_ids": inputs["input_ids"].cpu().numpy(), "attention_mask": inputs["attention_mask"].cpu().numpy(), "num_beams": np.array(SCREAMING_SNAKE_CASE_ ), "max_length": np.array(SCREAMING_SNAKE_CASE_ ), "decoder_start_token_id": np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info("Model outputs from torch and ONNX Runtime are similar." ) logger.info("Success." ) def _a ( ): __lowerCAmelCase = parse_args() __lowerCAmelCase = 5 __lowerCAmelCase = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() __lowerCAmelCase = torch.device(args.device ) __lowerCAmelCase , __lowerCAmelCase = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE_ ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" ) model.to(SCREAMING_SNAKE_CASE_ ) if args.max_length: __lowerCAmelCase = args.max_length if args.num_beams: __lowerCAmelCase = args.num_beams if args.output_file_path: __lowerCAmelCase = args.output_file_path else: __lowerCAmelCase = "BART.onnx" logger.info("Exporting model to ONNX" ) export_and_validate_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
92
import argparse import os import re import packaging.version UpperCamelCase__ = """examples/""" UpperCamelCase__ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCamelCase__ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCamelCase__ = """README.md""" def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern] __lowerCAmelCase = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" ) def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not patch: update_version_in_examples(SCREAMING_SNAKE_CASE_ ) def _a ( ): __lowerCAmelCase = "🤗 Transformers currently provides the following architectures" __lowerCAmelCase = "1. Want to contribute a new model?" with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.readlines() # Find the start of the list. __lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): __lowerCAmelCase = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) def _a ( ): with open(REPLACE_FILES["init"] , "r" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0] return packaging.version.parse(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=False ): __lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: __lowerCAmelCase = default_version.base_version elif patch: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = default_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def _a ( ): __lowerCAmelCase = get_version() __lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __lowerCAmelCase = current_version.base_version # Check with the user we got that right. __lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: __lowerCAmelCase = dev_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE_ ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCamelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
92
1
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow UpperCamelCase__ = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ """text-classification""", """language-modeling""", """summarization""", """token-classification""", """question-answering""", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) UpperCamelCase__ = logging.getLogger() def _a ( ): __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("-f" ) __lowerCAmelCase = parser.parse_args() return args.f def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="eval" ): __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{split}_results.json""" ) if os.path.exists(SCREAMING_SNAKE_CASE_ ): with open(SCREAMING_SNAKE_CASE_ , "r" ) as f: return json.load(SCREAMING_SNAKE_CASE_ ) raise ValueError(F"""can't find {path}""" ) UpperCamelCase__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class a__ ( snake_case__ ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = f""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(_A , "argv" , _A ): run_flax_glue.main() __lowerCAmelCase = get_results(_A ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = f""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(_A , "argv" , _A ): run_clm_flax.main() __lowerCAmelCase = get_results(_A ) self.assertLess(result["eval_perplexity"] , 1_0_0 ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = f""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(_A , "argv" , _A ): run_summarization_flax.main() __lowerCAmelCase = get_results(_A , split="test" ) self.assertGreaterEqual(result["test_rouge1"] , 1_0 ) self.assertGreaterEqual(result["test_rouge2"] , 2 ) self.assertGreaterEqual(result["test_rougeL"] , 7 ) self.assertGreaterEqual(result["test_rougeLsum"] , 7 ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = f""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(_A , "argv" , _A ): run_mlm_flax.main() __lowerCAmelCase = get_results(_A ) self.assertLess(result["eval_perplexity"] , 4_2 ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = f""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(_A , "argv" , _A ): run_ta_mlm_flax.main() __lowerCAmelCase = get_results(_A ) self.assertGreaterEqual(result["eval_accuracy"] , 0.42 ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = 7 if get_gpu_count() > 1 else 2 __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = f""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(_A , "argv" , _A ): run_flax_ner.main() __lowerCAmelCase = get_results(_A ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertGreaterEqual(result["eval_f1"] , 0.3 ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = f""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(_A , "argv" , _A ): run_qa.main() __lowerCAmelCase = get_results(_A ) self.assertGreaterEqual(result["eval_f1"] , 3_0 ) self.assertGreaterEqual(result["eval_exact"] , 3_0 )
92
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a__ ( snake_case__ , unittest.TestCase ): _a : Dict = KandinskyImgaImgPipeline _a : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""] _a : str = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", ] _a : List[Any] = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] _a : int = False @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 3_2 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.time_input_dim @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.time_input_dim * 4 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return 1_0_0 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , ) __lowerCAmelCase = MultilingualCLIP(_A ) __lowerCAmelCase = text_encoder.eval() return text_encoder @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase = UNetaDConditionModel(**_A ) return model @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = { "num_train_timesteps": 1_0_0_0, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __lowerCAmelCase = DDIMScheduler(**_A ) __lowerCAmelCase = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ): """simple docstring""" __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A ) __lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A ) __lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) if str(_A ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(_A ) else: __lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) __lowerCAmelCase = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 1_0, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "cpu" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**_A ) __lowerCAmelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(_A ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowerCAmelCase = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __lowerCAmelCase = "A red cartoon frog, 4k" __lowerCAmelCase = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(_A ) __lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( _A , generator=_A , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __lowerCAmelCase = pipeline( _A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(_A , _A )
92
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf UpperCamelCase__ = logging.get_logger(__name__) @dataclass class a__ ( snake_case__ ): _a : List[str] = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **_A ): """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __lowerCAmelCase = deprecated_arg[3:] __lowerCAmelCase = not kwargs.pop(_A ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) __lowerCAmelCase = kwargs.pop("tpu_name" , self.tpu_name ) __lowerCAmelCase = kwargs.pop("device_idx" , self.device_idx ) __lowerCAmelCase = kwargs.pop("eager_mode" , self.eager_mode ) __lowerCAmelCase = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**_A ) _a : str = field( default=snake_case__ , metadata={"""help""": """Name of TPU"""} , ) _a : int = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) _a : bool = field(default=snake_case__ , metadata={"""help""": """Benchmark models in eager model."""} ) _a : bool = field( default=snake_case__ , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) __lowerCAmelCase = None if self.tpu: try: if self.tpu_name: __lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __lowerCAmelCase = None return tpu @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) __lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU __lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" ) return strategy @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return self._setup_strategy @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.n_gpu > 0
92
class a__ ( snake_case__ ): pass class a__ ( snake_case__ ): pass class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [ [], [], [], ] def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" try: if len(self.queues[priority] ) >= 1_0_0: raise OverflowError("Maximum queue size is 100" ) self.queues[priority].append(_A ) except IndexError: raise ValueError("Valid priorities are 0, 1, and 2" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("All queues are empty" ) def __str__( self ): """simple docstring""" return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) ) class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [] def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if len(self.queue ) == 1_0_0: raise OverFlowError("Maximum queue size is 100" ) self.queue.append(_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.queue: raise UnderFlowError("The queue is empty" ) else: __lowerCAmelCase = min(self.queue ) self.queue.remove(_A ) return data def __str__( self ): """simple docstring""" return str(self.queue ) def _a ( ): __lowerCAmelCase = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 1_00 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _a ( ): __lowerCAmelCase = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(1_00 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
92
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} UpperCamelCase__ = { """vocab_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), }, """merges_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), }, } UpperCamelCase__ = { """allenai/longformer-base-4096""": 4096, """allenai/longformer-large-4096""": 4096, """allenai/longformer-large-4096-finetuned-triviaqa""": 4096, """allenai/longformer-base-4096-extra.pos.embd.only""": 4096, """allenai/longformer-large-4096-extra.pos.embd.only""": 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _a ( ): __lowerCAmelCase = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __lowerCAmelCase = bs[:] __lowerCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE_ ) cs.append(2**8 + n ) n += 1 __lowerCAmelCase = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): __lowerCAmelCase = set() __lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCAmelCase = char return pairs class a__ ( snake_case__ ): _a : str = VOCAB_FILES_NAMES _a : List[str] = PRETRAINED_VOCAB_FILES_MAP _a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): """simple docstring""" __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding="utf-8" ) as vocab_handle: __lowerCAmelCase = json.load(_A ) __lowerCAmelCase = {v: k for k, v in self.encoder.items()} __lowerCAmelCase = errors # how to handle errors in decoding __lowerCAmelCase = bytes_to_unicode() __lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding="utf-8" ) as merges_handle: __lowerCAmelCase = merges_handle.read().split("\n" )[1:-1] __lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] __lowerCAmelCase = dict(zip(_A , range(len(_A ) ) ) ) __lowerCAmelCase = {} __lowerCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowerCAmelCase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return len(self.encoder ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if token in self.cache: return self.cache[token] __lowerCAmelCase = tuple(_A ) __lowerCAmelCase = get_pairs(_A ) if not pairs: return token while True: __lowerCAmelCase = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowerCAmelCase , __lowerCAmelCase = bigram __lowerCAmelCase = [] __lowerCAmelCase = 0 while i < len(_A ): try: __lowerCAmelCase = word.index(_A , _A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowerCAmelCase = j if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCAmelCase = tuple(_A ) __lowerCAmelCase = new_word if len(_A ) == 1: break else: __lowerCAmelCase = get_pairs(_A ) __lowerCAmelCase = " ".join(_A ) __lowerCAmelCase = word return word def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = [] for token in re.findall(self.pat , _A ): __lowerCAmelCase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) ) return bpe_tokens def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.decoder.get(_A ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = "".join(_A ) __lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( _A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowerCAmelCase = os.path.join( _A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + "\n" ) __lowerCAmelCase = 0 with open(_A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __lowerCAmelCase = token_index writer.write(" ".join(_A ) + "\n" ) index += 1 return vocab_file, merge_file def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] __lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return [1] + ([0] * len(_A )) + [1] return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1] def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __SCREAMING_SNAKE_CASE( self , _A , _A=False , **_A ): """simple docstring""" __lowerCAmelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()): __lowerCAmelCase = " " + text return (text, kwargs)
92
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a__ : def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = scope __lowerCAmelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCAmelCase = (image_size // patch_size) ** 2 __lowerCAmelCase = num_patches + 2 def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTModel(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = DeiTForMaskedImageModeling(config=_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForMaskedImageModeling(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ): """simple docstring""" __lowerCAmelCase = self.type_sequence_label_size __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a__ ( snake_case__ , snake_case__ , unittest.TestCase ): _a : Optional[Any] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _a : int = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _a : Optional[Any] = False _a : Tuple = False _a : Tuple = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_A ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ): """simple docstring""" __lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.model_tester.is_training: return __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_A ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __lowerCAmelCase = False __lowerCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue __lowerCAmelCase = model_class(_A ) model.gradient_checkpointing_enable() model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) __lowerCAmelCase = model(**_A ).loss loss.backward() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_A ), *get_values(_A ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ): __lowerCAmelCase = problem_type["title"] __lowerCAmelCase = problem_type["num_labels"] __lowerCAmelCase = model_class(_A ) model.to(_A ) model.train() __lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A ) if problem_type["num_labels"] > 1: __lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) __lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_A ) as warning_list: __lowerCAmelCase = model(**_A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = DeiTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _a ( ): __lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): @cached_property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( _A ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**_A ) # verify the logits __lowerCAmelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _A ) __lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ) __lowerCAmelCase = inputs.pixel_values.to(_A ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __lowerCAmelCase = model(_A )
92
1
def _a ( SCREAMING_SNAKE_CASE_ : list ): if len(SCREAMING_SNAKE_CASE_ ) <= 1: return [tuple(SCREAMING_SNAKE_CASE_ )] __lowerCAmelCase = [] def generate(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , SCREAMING_SNAKE_CASE_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even __lowerCAmelCase , __lowerCAmelCase = arr[k - 1], arr[i] else: # k is odd __lowerCAmelCase , __lowerCAmelCase = arr[k - 1], arr[0] generate(k - 1 , SCREAMING_SNAKE_CASE_ ) generate(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return res if __name__ == "__main__": UpperCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
92
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ): __lowerCAmelCase = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
92
1
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
92
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( """The `image_to_image.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionImg2ImgPipeline` instead.""" )
92
1
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class a__ ( yaml.SafeLoader ): def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value] __lowerCAmelCase = [tuple(_A ) if isinstance(_A , _A ) else key for key in keys] __lowerCAmelCase = Counter(_A ) __lowerCAmelCase = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" ) def __SCREAMING_SNAKE_CASE( self , _A , _A=False ): """simple docstring""" __lowerCAmelCase = super().construct_mapping(_A , deep=_A ) self._check_no_duplicates_on_constructed_node(_A ) return mapping def _a ( SCREAMING_SNAKE_CASE_ : str ): __lowerCAmelCase = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: __lowerCAmelCase = full_content[1:].index("---" ) + 1 __lowerCAmelCase = "\n".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(SCREAMING_SNAKE_CASE_ ) class a__ ( snake_case__ ): # class attributes _a : List[Any] = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __SCREAMING_SNAKE_CASE( cls , _A ): """simple docstring""" with open(_A , encoding="utf-8" ) as readme_file: __lowerCAmelCase , __lowerCAmelCase = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(_A ) else: return cls() def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if path.exists(): with open(_A , encoding="utf-8" ) as readme_file: __lowerCAmelCase = readme_file.read() else: __lowerCAmelCase = None __lowerCAmelCase = self._to_readme(_A ) with open(_A , "w" , encoding="utf-8" ) as readme_file: readme_file.write(_A ) def __SCREAMING_SNAKE_CASE( self , _A = None ): """simple docstring""" if readme_content is not None: __lowerCAmelCase , __lowerCAmelCase = _split_yaml_from_readme(_A ) __lowerCAmelCase = "---\n" + self.to_yaml_string() + "---\n" + content else: __lowerCAmelCase = "---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def __SCREAMING_SNAKE_CASE( cls , _A ): """simple docstring""" __lowerCAmelCase = yaml.load(_A , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields __lowerCAmelCase = { (key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return yaml.safe_dump( { (key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=_A , allow_unicode=_A , encoding="utf-8" , ).decode("utf-8" ) UpperCamelCase__ = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCamelCase__ = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") UpperCamelCase__ = ap.parse_args() UpperCamelCase__ = Path(args.readme_filepath) UpperCamelCase__ = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
92
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType UpperCamelCase__ = get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if accelerator.process_index == 0: logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {ckpt_dir}""" ) __lowerCAmelCase = {"model": state_dict} dist_cp.save_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Model saved to {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) if F"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading model from {ckpt_dir}""" ) __lowerCAmelCase = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , ) __lowerCAmelCase = state_dict["model"] logger.info(F"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state saved in {output_optimizer_file}""" ) else: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Optimizer state saved in {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" ) else: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if F"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading Optimizer from {ckpt_dir}""" ) __lowerCAmelCase = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , ) __lowerCAmelCase = optim_state["optimizer"] logger.info(F"""Optimizer loaded from {ckpt_dir}""" ) __lowerCAmelCase = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
92
1