code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( UpperCAmelCase__ ,unittest.TestCase ): _UpperCamelCase : List[str] = DanceDiffusionPipeline _UpperCamelCase : Union[str, Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS _UpperCamelCase : Optional[Any] = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } _UpperCamelCase : Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS _UpperCamelCase : Optional[Any] = False _UpperCamelCase : Tuple = False def _snake_case ( self ) -> int: """simple docstring""" torch.manual_seed(0 ) a__ : Tuple = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowerCAmelCase , use_timestep_embedding=__lowerCAmelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , ) a__ : Tuple = IPNDMScheduler() a__ : List[Any] = { "unet": unet, "scheduler": scheduler, } return components def _snake_case ( self , snake_case , snake_case=0 ) -> Tuple: """simple docstring""" if str(__lowerCAmelCase ).startswith("mps" ): a__ : List[Any] = torch.manual_seed(__lowerCAmelCase ) else: a__ : str = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) a__ : int = { "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : int = "cpu" # ensure determinism for the device-dependent torch.Generator a__ : str = self.get_dummy_components() a__ : List[Any] = DanceDiffusionPipeline(**__lowerCAmelCase ) a__ : Optional[int] = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) a__ : Dict = self.get_dummy_inputs(__lowerCAmelCase ) a__ : str = pipe(**__lowerCAmelCase ) a__ : Dict = output.audios a__ : List[Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) a__ : Tuple = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _snake_case ( self ) -> List[str]: """simple docstring""" return super().test_save_load_local() @skip_mps def _snake_case ( self ) -> List[str]: """simple docstring""" return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" return super().test_save_load_optional_components() @skip_mps def _snake_case ( self ) -> List[Any]: """simple docstring""" return super().test_attention_slicing_forward_pass() def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def _snake_case ( self ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Tuple = torch_device a__ : Union[str, Any] = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" ) a__ : Dict = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) a__ : Union[str, Any] = torch.manual_seed(0 ) a__ : Any = pipe(generator=__lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) a__ : Union[str, Any] = output.audios a__ : str = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) a__ : Optional[int] = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self ) -> Any: """simple docstring""" a__ : Dict = torch_device a__ : Union[str, Any] = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa ) a__ : int = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) a__ : int = torch.manual_seed(0 ) a__ : Optional[int] = pipe(generator=__lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) a__ : List[str] = output.audios a__ : Optional[int] = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) a__ : Union[str, Any] = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
710
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random""" SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random""" @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> List[Any]: """simple docstring""" return AutoConfig.from_pretrained(snake_case ) def _snake_case ( self ) -> Any: """simple docstring""" a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _snake_case ( self ) -> str: """simple docstring""" a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _snake_case ( self ) -> int: """simple docstring""" with self.assertRaises(snake_case ): create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
629
0
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __lowerCAmelCase ( __UpperCAmelCase ): def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Optional[int] = tempfile.mkdtemp() a__ : Tuple = 8 # DPR tok a__ : int = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a__ : Tuple = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) a__ : Dict = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok a__ : List[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] a__ : Union[str, Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) a__ : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a__ : int = {"unk_token": "<unk>"} a__ : str = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) a__ : Any = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"] ) a__ : int = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase_ ) ) def _snake_case ( self ) -> int: """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def _snake_case ( self ) -> List[str]: """simple docstring""" return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def _snake_case ( self ) -> Any: """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def _snake_case ( self ) -> Any: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _snake_case ( self ) -> int: """simple docstring""" a__ : Dict = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : str = self.get_dummy_dataset() a__ : str = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: a__ : Optional[int] = dataset a__ : Dict = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _snake_case ( self , snake_case ) -> Optional[int]: """simple docstring""" a__ : List[str] = self.get_dummy_dataset() a__ : str = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: a__ : Tuple = os.path.join(self.tmpdirname , "dataset" ) a__ : Dict = os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset a__ : str = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: a__ : List[Any] = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def _snake_case ( self ) -> Dict: """simple docstring""" a__ : List[Any] = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) a__ : Tuple = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) a__ : int = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) a__ : Dict = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , "wb" ) ) a__ : List[str] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) a__ : Optional[int] = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _snake_case ( self ) -> Any: """simple docstring""" a__ : str = 1 a__ : List[Any] = self.get_dummy_canonical_hf_index_retriever() a__ : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ , a__ , a__ : Dict = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _snake_case ( self ) -> str: """simple docstring""" a__ : Any = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: a__ : Optional[Any] = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) a__ : str = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) a__ : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Optional[Any] = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : List[Any] = 1 a__ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) a__ : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ , a__ , a__ : Optional[Any] = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) a__ : List[str] = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) a__ : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Any = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def _snake_case ( self ) -> str: """simple docstring""" a__ : List[str] = 1 a__ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) a__ : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ , a__ , a__ : Union[str, Any] = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) a__ : Optional[int] = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) a__ : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Tuple = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Any = 1 a__ : Optional[int] = self.get_dummy_legacy_index_retriever() a__ : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ , a__ , a__ : Tuple = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : Tuple = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) a__ : Union[str, Any] = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) a__ : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : List[str] = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _snake_case ( self ) -> int: """simple docstring""" import torch a__ : Any = 1 a__ : Dict = self.get_dummy_canonical_hf_index_retriever() a__ : Optional[int] = [[5, 7], [10, 11]] a__ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : str = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) a__ , a__ , a__ : List[str] = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) a__ : str = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors="pt" , ) a__ , a__ , a__ , a__ : Union[str, Any] = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Optional[Any] = self.get_dpr_ctx_encoder_tokenizer() a__ : List[str] = 1 a__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) a__ : Optional[Any] = [[5, 7], [10, 11]] a__ : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Tuple = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
711
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1""" SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2""" SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3""" SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4""" class __lowerCAmelCase ( _UpperCamelCase ): def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any: """simple docstring""" super()._init_() a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : int = StableDiffusionPipeline( vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _snake_case ( self ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )} def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a__ : List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def _snake_case ( self ) -> Tuple: """simple docstring""" self.enable_attention_slicing(snake_case ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" a__ : Any = "cuda" if torch.cuda.is_available() else "cpu" self.to(snake_case ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 a__ : Any = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.2 a__ : List[Any] = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.3 a__ : Optional[Any] = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.4 a__ : Dict = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
629
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class __lowerCAmelCase ( UpperCamelCase_ ): _UpperCamelCase : Any = field(default="""image-classification""" ,metadata={"""include_in_asdict_even_if_is_default""": True} ) _UpperCamelCase : Union[str, Any] = Features({"""image""": Image()} ) _UpperCamelCase : Optional[Any] = Features({"""labels""": ClassLabel} ) _UpperCamelCase : List[str] = """image""" _UpperCamelCase : Any = """labels""" def _snake_case ( self , snake_case ) -> str: """simple docstring""" if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , UpperCamelCase__ ): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" ) a__ : Optional[int] = copy.deepcopy(self ) a__ : Any = self.label_schema.copy() a__ : Optional[Any] = features[self.label_column] a__ : Union[str, Any] = label_schema return task_template @property def _snake_case ( self ) -> str: """simple docstring""" return { self.image_column: "image", self.label_column: "labels", }
712
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665 def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ): if fluid_density <= 0: raise ValueError("Impossible fluid density" ) if volume < 0: raise ValueError("Impossible Object volume" ) if gravity <= 0: raise ValueError("Impossible Gravity" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
629
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## SCREAMING_SNAKE_CASE__ : List[str] = 1_6 SCREAMING_SNAKE_CASE__ : List[Any] = 3_2 def _A ( lowerCamelCase , lowerCamelCase = 16 ): a__ : Optional[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) a__ : str = load_dataset("glue" , "mrpc" ) def tokenize_function(lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) a__ : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__UpperCamelCase , max_length=__UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a__ : str = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a__ : List[str] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. a__ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a__ : List[str] = 16 elif accelerator.mixed_precision != "no": a__ : Optional[int] = 8 else: a__ : List[Any] = None return tokenizer.pad( __UpperCamelCase , padding="longest" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="pt" , ) # Instantiate dataloaders. a__ : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) a__ : Union[str, Any] = DataLoader( tokenized_datasets["validation"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders SCREAMING_SNAKE_CASE__ : str = mocked_dataloaders # noqa: F811 def _A ( lowerCamelCase , lowerCamelCase ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , __UpperCamelCase ) == "1": a__ : Dict = 2 # New Code # a__ : Tuple = int(args.gradient_accumulation_steps ) a__ : List[Any] = int(args.local_sgd_steps ) # Initialize accelerator a__ : int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a__ : Optional[Any] = config["lr"] a__ : List[str] = int(config["num_epochs"] ) a__ : str = int(config["seed"] ) a__ : Any = int(config["batch_size"] ) a__ : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(__UpperCamelCase ) a__ , a__ : List[Any] = get_dataloaders(__UpperCamelCase , __UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a__ : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer a__ : List[Any] = AdamW(params=model.parameters() , lr=__UpperCamelCase ) # Instantiate scheduler a__ : Any = get_linear_schedule_with_warmup( optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a__ , a__ , a__ , a__ , a__ : Dict = accelerator.prepare( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Now we train the model for epoch in range(__UpperCamelCase ): model.train() with LocalSGD( accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__UpperCamelCase ): a__ : Optional[int] = model(**__UpperCamelCase ) a__ : Tuple = output.loss accelerator.backward(__UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a__ : Optional[int] = model(**__UpperCamelCase ) a__ : List[Any] = outputs.logits.argmax(dim=-1 ) a__ , a__ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=__UpperCamelCase , references=__UpperCamelCase , ) a__ : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __UpperCamelCase ) def _A ( ): a__ : Optional[int] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=__UpperCamelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument( "--local_sgd_steps" , type=__UpperCamelCase , default=8 , help="Number of local SGD steps or None to disable local SGD" ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) a__ : List[Any] = parser.parse_args() a__ : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
713
from __future__ import annotations from random import random class __lowerCAmelCase : def __init__( self , snake_case = None ) -> Any: """simple docstring""" a__ : Optional[int] = value a__ : Tuple = random() a__ : Node | None = None a__ : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F"""'{self.value}: {self.prior:.5}'""" else: return pformat( {F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" a__ : List[Any] = str(self.value ) + " " a__ : List[str] = str(self.left or "" ) a__ : Tuple = str(self.right or "" ) return value + left + right def _A ( lowerCamelCase , lowerCamelCase ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: a__ , a__ : Dict = split(root.left , lowerCamelCase ) return left, root else: a__ , a__ : int = split(root.right , lowerCamelCase ) return root, right def _A ( lowerCamelCase , lowerCamelCase ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: a__ : List[Any] = merge(left.right , lowerCamelCase ) return left else: a__ : int = merge(lowerCamelCase , right.left ) return right def _A ( lowerCamelCase , lowerCamelCase ): a__ : Any = Node(lowerCamelCase ) a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase ) return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) def _A ( lowerCamelCase , lowerCamelCase ): a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 ) a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase ) return merge(lowerCamelCase , lowerCamelCase ) def _A ( lowerCamelCase ): if not root: # None return else: inorder(root.left ) print(root.value , end="," ) inorder(root.right ) def _A ( lowerCamelCase , lowerCamelCase ): for arg in args.split(): if arg[0] == "+": a__ : int = insert(lowerCamelCase , int(arg[1:] ) ) elif arg[0] == "-": a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) ) else: print("Unknown command" ) return root def _A ( ): a__ : List[str] = None print( "enter numbers to create a tree, + value to add value into treap, " "- value to erase all nodes with value. 'q' to quit. " ) a__ : int = input() while args != "q": a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase ) print(lowerCamelCase ) a__ : Optional[Any] = input() print("good by!" ) if __name__ == "__main__": import doctest doctest.testmod() main()
629
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = {"""vocab_file""": """spm_char.model"""} SCREAMING_SNAKE_CASE__ : str = { """vocab_file""": { """microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""", """microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""", """microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""", } } SCREAMING_SNAKE_CASE__ : Dict = { """microsoft/speecht5_asr""": 1_0_2_4, """microsoft/speecht5_tts""": 1_0_2_4, """microsoft/speecht5_vc""": 1_0_2_4, } class __lowerCAmelCase ( UpperCAmelCase__ ): _UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Tuple = ['input_ids', 'attention_mask'] def __init__( self , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case = None , **snake_case , ) -> None: """simple docstring""" a__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , ) a__ : str = vocab_file a__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case ) @property def _snake_case ( self ) -> List[str]: """simple docstring""" return self.sp_model.get_piece_size() def _snake_case ( self ) -> int: """simple docstring""" a__ : List[Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> str: """simple docstring""" a__ : int = self.__dict__.copy() a__ : Optional[Any] = None return state def __setstate__( self , snake_case ) -> Any: """simple docstring""" a__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a__ : Union[str, Any] = {} a__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , snake_case ) -> List[str]: """simple docstring""" return self.sp_model.encode(snake_case , out_type=snake_case ) def _snake_case ( self , snake_case ) -> Tuple: """simple docstring""" return self.sp_model.piece_to_id(snake_case ) def _snake_case ( self , snake_case ) -> Optional[Any]: """simple docstring""" a__ : str = self.sp_model.IdToPiece(snake_case ) return token def _snake_case ( self , snake_case ) -> str: """simple docstring""" a__ : List[Any] = [] a__ : Any = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(snake_case ) + token a__ : Dict = [] else: current_sub_tokens.append(snake_case ) out_string += self.sp_model.decode(snake_case ) return out_string.strip() def _snake_case ( self , snake_case , snake_case=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _snake_case ( self , snake_case , snake_case = None , snake_case = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) a__ : List[str] = [1] if token_ids_a is None: return ([0] * len(snake_case )) + suffix_ones return ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(snake_case ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a__ : int = os.path.join( snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case ) elif not os.path.isfile(self.vocab_file ): with open(snake_case , "wb" ) as fi: a__ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(snake_case ) return (out_vocab_file,)
714
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ): _UpperCamelCase : Optional[int] = StableUnCLIPPipeline _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _UpperCamelCase : Any = False def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Any = 32 a__ : int = embedder_hidden_size # prior components torch.manual_seed(0 ) a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) a__ : Optional[Any] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) a__ : int = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , ) torch.manual_seed(0 ) a__ : str = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case ) a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) a__ : Union[str, Any] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) a__ : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , ) torch.manual_seed(0 ) a__ : Tuple = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , ) torch.manual_seed(0 ) a__ : Optional[int] = AutoencoderKL() a__ : Any = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _snake_case ( self , snake_case , snake_case=0 ) -> Dict: """simple docstring""" if str(snake_case ).startswith("mps" ): a__ : Union[str, Any] = torch.manual_seed(snake_case ) else: a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case ) a__ : Any = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Dict = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=snake_case ) def _snake_case ( self ) -> int: """simple docstring""" a__ : int = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=snake_case ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def _snake_case ( self ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 ) a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" ) a__ : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case , snake_case ) def _snake_case ( self ) -> Tuple: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) a__ : Union[str, Any] = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a__ : Union[str, Any] = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) a__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
629
0
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): # Load configuration defined in the metadata file with open(SCREAMING_SNAKE_CASE_ ) as metadata_file: a__ : Any = json.load(SCREAMING_SNAKE_CASE_ ) a__ : Optional[Any] = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata["model_config"] ) # Load in the weights from the checkpoint_path a__ : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["module"] # Load the entity vocab file a__ : List[Any] = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ ) # add an entry for [MASK2] a__ : Any = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 a__ : Tuple = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks a__ : Dict = AddedToken("<ent>" , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) a__ : List[Any] = AddedToken("<ent2>" , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , "tokenizer_config.json" ) , "r" ) as f: a__ : Any = json.load(SCREAMING_SNAKE_CASE_ ) a__ : Dict = "MLukeTokenizer" with open(os.path.join(SCREAMING_SNAKE_CASE_ , "tokenizer_config.json" ) , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) a__ : List[Any] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Initialize the embeddings of the special tokens a__ : Dict = tokenizer.convert_tokens_to_ids(["@"] )[0] a__ : List[Any] = tokenizer.convert_tokens_to_ids(["#"] )[0] a__ : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"] a__ : int = word_emb[ent_init_index].unsqueeze(0 ) a__ : Optional[Any] = word_emb[enta_init_index].unsqueeze(0 ) a__ : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: a__ : int = state_dict[bias_name] a__ : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 ) a__ : int = decoder_bias[enta_init_index].unsqueeze(0 ) a__ : List[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: a__ : Optional[int] = F"""encoder.layer.{layer_index}.attention.self.""" a__ : List[Any] = state_dict[prefix + matrix_name] a__ : List[str] = state_dict[prefix + matrix_name] a__ : Union[str, Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks a__ : Optional[int] = state_dict["entity_embeddings.entity_embeddings.weight"] a__ : Dict = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) a__ : Optional[Any] = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' a__ : str = state_dict["entity_predictions.bias"] a__ : Optional[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) a__ : Tuple = torch.cat([entity_prediction_bias, entity_mask_bias] ) a__ : List[str] = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) a__ : Any = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): a__ : Dict = state_dict[key] else: a__ : List[Any] = state_dict[key] a__ , a__ : int = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}: raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(SCREAMING_SNAKE_CASE_ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs a__ : str = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task="entity_classification" ) a__ : List[str] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." a__ : List[Any] = (0, 9) a__ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors="pt" ) a__ : Dict = model(**SCREAMING_SNAKE_CASE_ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base a__ : Tuple = torch.Size((1, 33, 768) ) a__ : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base a__ : List[Any] = torch.Size((1, 1, 768) ) a__ : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" F""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction a__ : Union[str, Any] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) a__ : Any = "Tokyo is the capital of <mask>." a__ : Tuple = (24, 30) a__ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors="pt" ) a__ : Tuple = model(**SCREAMING_SNAKE_CASE_ ) a__ : List[Any] = encoding["input_ids"][0].tolist() a__ : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) a__ : Union[str, Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ ) a__ : Any = outputs.entity_logits[0][0].argmax().item() a__ : Any = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(SCREAMING_SNAKE_CASE_ ) ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) def _A ( lowerCamelCase ): a__ : str = ["[MASK]", "[PAD]", "[UNK]"] a__ : str = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )] a__ : Optional[int] = {} for entry in data: a__ : Tuple = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: a__ : Union[str, Any] = entity_id break a__ : Union[str, Any] = F"""{language}:{entity_name}""" a__ : List[Any] = entity_id return new_mapping if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""") parser.add_argument( """--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration.""" ) parser.add_argument( """--entity_vocab_path""", default=None, type=str, help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model.""" ) parser.add_argument( """--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted.""" ) SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : str = { """configuration_distilbert""": [ """DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DistilBertConfig""", """DistilBertOnnxConfig""", ], """tokenization_distilbert""": ["""DistilBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ """DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DistilBertForMaskedLM""", """DistilBertForMultipleChoice""", """DistilBertForQuestionAnswering""", """DistilBertForSequenceClassification""", """DistilBertForTokenClassification""", """DistilBertModel""", """DistilBertPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = [ """TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDistilBertForMaskedLM""", """TFDistilBertForMultipleChoice""", """TFDistilBertForQuestionAnswering""", """TFDistilBertForSequenceClassification""", """TFDistilBertForTokenClassification""", """TFDistilBertMainLayer""", """TFDistilBertModel""", """TFDistilBertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any = [ """FlaxDistilBertForMaskedLM""", """FlaxDistilBertForMultipleChoice""", """FlaxDistilBertForQuestionAnswering""", """FlaxDistilBertForSequenceClassification""", """FlaxDistilBertForTokenClassification""", """FlaxDistilBertModel""", """FlaxDistilBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
import math def _A ( lowerCamelCase ): if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): a__ : List[Any] = F"""Input value of [number={number}] must be an integer""" raise TypeError(_lowerCAmelCase ) if number < 1: a__ : Union[str, Any] = F"""Input value of [number={number}] must be > 0""" raise ValueError(_lowerCAmelCase ) elif number == 1: return 3 elif number == 2: return 5 else: a__ : List[Any] = int(math.log(number // 3 , 2 ) ) + 2 a__ : Optional[Any] = [3, 5] a__ : Optional[int] = 2 a__ : int = 3 for block in range(1 , _lowerCAmelCase ): for _ in range(_lowerCAmelCase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(1_1): SCREAMING_SNAKE_CASE__ : List[str] = 0 try: SCREAMING_SNAKE_CASE__ : Optional[Any] = proth(number) except ValueError: print(f'ValueError: there is no {number}th Proth number') continue print(f'The {number}th Proth number: {value}')
716
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def _A ( lowerCamelCase ): # A local function to see if a dot lands in the circle. def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool: a__ : Any = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle a__ : Union[str, Any] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCamelCase ) ) # The ratio of the area for circle to square is pi/4. a__ : Any = proportion * 4 print(F"""The estimated value of pi is {pi_estimate}""" ) print(F"""The numpy value of pi is {pi}""" ) print(F"""The total error is {abs(pi - pi_estimate )}""" ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ): return mean( function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value) def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ): def identity_function(lowerCamelCase ) -> float: return x a__ : int = area_under_curve_estimator( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) a__ : int = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {expected_value}""" ) print(F"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def _A ( lowerCamelCase ): def function_to_integrate(lowerCamelCase ) -> float: return sqrt(4.0 - x * x ) a__ : int = area_under_curve_estimator( lowerCamelCase , lowerCamelCase , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {pi}""" ) print(F"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
629
0
import cva import numpy as np class __lowerCAmelCase : def __init__( self , snake_case , snake_case ) -> Optional[int]: """simple docstring""" if k in (0.04, 0.06): a__ : int = k a__ : Union[str, Any] = window_size else: raise ValueError("invalid k value" ) def __str__( self ) -> str: """simple docstring""" return str(self.k ) def _snake_case ( self , snake_case ) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" a__ : str = cva.imread(snake_case , 0 ) a__ , a__ : List[Any] = img.shape a__ : Tuple = [] a__ : Union[str, Any] = img.copy() a__ : Any = cva.cvtColor(snake_case , cva.COLOR_GRAY2RGB ) a__ , a__ : str = np.gradient(snake_case ) a__ : Dict = dx**2 a__ : str = dy**2 a__ : Tuple = dx * dy a__ : Optional[Any] = 0.04 a__ : int = self.window_size // 2 for y in range(snake_case , h - offset ): for x in range(snake_case , w - offset ): a__ : Union[str, Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() a__ : Union[str, Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() a__ : int = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() a__ : Optional[int] = (wxx * wyy) - (wxy**2) a__ : Optional[Any] = wxx + wyy a__ : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = HarrisCorner(0.04, 3) SCREAMING_SNAKE_CASE__ : int = edge_detect.detect("""path_to_image""") cva.imwrite("""detect.png""", color_img)
717
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _A ( lowerCamelCase , lowerCamelCase ): a__ : Dict = old_name if "patch_embed" in old_name: a__ , a__ , a__ : Union[str, Any] = old_name.split("." ) if layer == "0": a__ : Union[str, Any] = old_name.replace("0" , "convolution1" ) elif layer == "1": a__ : Dict = old_name.replace("1" , "batchnorm_before" ) elif layer == "3": a__ : List[str] = old_name.replace("3" , "convolution2" ) else: a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" ) if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ): a__ : List[str] = r"\b\d{2}\b" if bool(re.search(lowerCamelCase , lowerCamelCase ) ): a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group() else: a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group() if int(match[0] ) < 6: a__ : List[Any] = old_name.replace(lowerCamelCase , "" ) a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] ) a__ : List[Any] = "intermediate_stages." + trimmed_name else: a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" ) if int(match[2] ) < num_meta4D_last_stage: a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] ) else: a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage ) a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index ) if "norm1" in old_name: a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" ) elif "norm2" in old_name: a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" ) elif "fc1" in old_name: a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" ) elif "fc2" in old_name: a__ : Any = trimmed_name.replace("fc2" , "linear_out" ) a__ : Any = "last_stage." + trimmed_name elif "network" in old_name and re.search(r".\d." , lowerCamelCase ): a__ : List[str] = old_name.replace("network" , "intermediate_stages" ) if "fc" in new_name: a__ : str = new_name.replace("fc" , "convolution" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): a__ : str = new_name.replace("norm1" , "batchnorm_before" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): a__ : Any = new_name.replace("norm2" , "batchnorm_after" ) if "proj" in new_name: a__ : Optional[int] = new_name.replace("proj" , "projection" ) if "dist_head" in new_name: a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" ) elif "head" in new_name: a__ : Optional[int] = new_name.replace("head" , "classifier" ) elif "patch_embed" in new_name: a__ : Tuple = "efficientformer." + new_name elif new_name == "norm.weight" or new_name == "norm.bias": a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" ) a__ : Optional[int] = "efficientformer." + new_name else: a__ : List[Any] = "efficientformer.encoder." + new_name return new_name def _A ( lowerCamelCase , lowerCamelCase ): for key in checkpoint.copy().keys(): a__ : Optional[Any] = checkpoint.pop(lowerCamelCase ) a__ : Dict = val return checkpoint def _A ( ): a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return image def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"] a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase ) a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase ) a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] ) a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1 a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() a__ : Dict = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } # prepare image a__ : str = prepare_img() a__ : Dict = 256 a__ : Union[str, Any] = 224 a__ : List[str] = EfficientFormerImageProcessor( size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , ) a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values # original processing pipeline a__ : List[str] = Compose( [ Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ), CenterCrop(lowerCamelCase ), ToTensor(), Normalize(lowerCamelCase , lowerCamelCase ), ] ) a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 ) assert torch.allclose(lowerCamelCase , lowerCamelCase ) a__ : Optional[int] = model(lowerCamelCase ) a__ : Any = outputs.logits a__ : Optional[Any] = (1, 1000) if "l1" in model_name: a__ : Tuple = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: a__ : int = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: a__ : Optional[Any] = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(lowerCamelCase ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print("Pushing model to the hub..." ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
629
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ : Optional[int] = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE__ : Optional[Any] = { """google/bigbird-roberta-base""": 4_0_9_6, """google/bigbird-roberta-large""": 4_0_9_6, """google/bigbird-base-trivia-itc""": 4_0_9_6, } SCREAMING_SNAKE_CASE__ : int = """▁""" class __lowerCAmelCase ( UpperCAmelCase__ ): _UpperCamelCase : Any = VOCAB_FILES_NAMES _UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[str] = BigBirdTokenizer _UpperCamelCase : str = ["input_ids", "attention_mask"] _UpperCamelCase : List[int] = [] def __init__( self , snake_case=None , snake_case=None , snake_case="<unk>" , snake_case="<s>" , snake_case="</s>" , snake_case="<pad>" , snake_case="[SEP]" , snake_case="[MASK]" , snake_case="[CLS]" , **snake_case , ) -> Dict: """simple docstring""" a__ : Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token a__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token a__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token a__ : Tuple = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token a__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token super().__init__( lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : Optional[int] = vocab_file a__ : Optional[Any] = False if not self.vocab_file else True def _snake_case ( self , snake_case , snake_case = None ) -> List[int]: """simple docstring""" a__ : List[Any] = [self.sep_token_id] a__ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self , snake_case , snake_case = None , snake_case = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase__ )) + [1] return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1] def _snake_case ( self , snake_case , snake_case = None ) -> List[int]: """simple docstring""" a__ : List[Any] = [self.sep_token_id] a__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCamelCase__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a__ : str = os.path.join( lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ): copyfile(self.vocab_file , lowerCamelCase__ ) return (out_vocab_file,)
718
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE__ : Any = { """unc-nlp/lxmert-base-uncased""": 5_1_2, } SCREAMING_SNAKE_CASE__ : Optional[int] = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = LxmertTokenizer def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any: """simple docstring""" super().__init__( snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , ) a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars ): a__ : str = getattr(snake_case , normalizer_state.pop("type" ) ) a__ : Tuple = do_lower_case a__ : Union[str, Any] = strip_accents a__ : str = tokenize_chinese_chars a__ : List[str] = normalizer_class(**snake_case ) a__ : str = do_lower_case def _snake_case ( self , snake_case , snake_case=None ) -> List[str]: """simple docstring""" a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , snake_case , snake_case = None ) -> List[int]: """simple docstring""" a__ : Tuple = [self.sep_token_id] a__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]: """simple docstring""" a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case )
629
0
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=4 , ) -> Union[str, Any]: """simple docstring""" a__ : Any = parent a__ : Tuple = batch_size a__ : str = seq_length a__ : Tuple = is_training a__ : List[str] = use_attention_mask a__ : Optional[Any] = use_token_type_ids a__ : Tuple = use_labels a__ : List[Any] = vocab_size a__ : List[str] = hidden_size a__ : Tuple = num_hidden_layers a__ : Optional[Any] = num_attention_heads a__ : str = intermediate_size a__ : Any = hidden_act a__ : Tuple = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Optional[int] = max_position_embeddings a__ : Any = type_vocab_size a__ : Any = type_sequence_label_size a__ : Dict = initializer_range a__ : Tuple = num_choices def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : Optional[int] = None if self.use_attention_mask: a__ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) a__ : Tuple = None if self.use_token_type_ids: a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a__ : List[Any] = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _snake_case ( self ) -> Dict: """simple docstring""" a__ : Dict = self.prepare_config_and_inputs() a__ , a__ , a__ , a__ : str = config_and_inputs a__ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def _snake_case ( self ) -> int: """simple docstring""" a__ : Optional[int] = self.prepare_config_and_inputs() a__ , a__ , a__ , a__ : Optional[int] = config_and_inputs a__ : List[str] = True a__ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __lowerCAmelCase ( __a ,unittest.TestCase ): _UpperCamelCase : str = True _UpperCamelCase : str = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def _snake_case ( self ) -> int: """simple docstring""" a__ : Dict = FlaxBertModelTester(self ) @slow def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Any = FlaxBertModel.from_pretrained("bert-base-cased" ) a__ : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase_ )
719
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[int] = """mobilenet_v2""" def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int: """simple docstring""" super().__init__(**snake_case ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) a__ : str = num_channels a__ : Dict = image_size a__ : Any = depth_multiplier a__ : str = depth_divisible_by a__ : Optional[int] = min_depth a__ : Dict = expand_ratio a__ : str = output_stride a__ : Optional[int] = first_layer_is_expansion a__ : Union[str, Any] = finegrained_output a__ : Union[str, Any] = hidden_act a__ : str = tf_padding a__ : List[Any] = classifier_dropout_prob a__ : List[Any] = initializer_range a__ : Optional[Any] = layer_norm_eps a__ : str = semantic_loss_ignore_index class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Any = version.parse("""1.11""" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _snake_case ( self ) -> float: """simple docstring""" return 1E-4
629
0
import unittest from knapsack import greedy_knapsack as kp class __lowerCAmelCase ( unittest.TestCase ): def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Tuple = [10, 20, 30, 40, 50, 60] a__ : Union[str, Any] = [2, 4, 6, 8, 10, 12] a__ : Union[str, Any] = 100 self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 ) def _snake_case ( self ) -> str: """simple docstring""" self.assertRaisesRegex(__a , "max_weight must greater than zero." ) def _snake_case ( self ) -> List[Any]: """simple docstring""" self.assertRaisesRegex(__a , "Weight can not be negative." ) def _snake_case ( self ) -> Dict: """simple docstring""" self.assertRaisesRegex(__a , "Profit can not be negative." ) def _snake_case ( self ) -> List[Any]: """simple docstring""" self.assertRaisesRegex(__a , "max_weight must greater than zero." ) def _snake_case ( self ) -> Tuple: """simple docstring""" self.assertRaisesRegex( __a , "The length of profit and weight must be same." ) if __name__ == "__main__": unittest.main()
720
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def _A ( lowerCamelCase ): a__ : List[str] = [] if isinstance(lowerCamelCase , lowerCamelCase ): for v in tree.values(): shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase ): a__ : List[str] = [] for d in reversed(lowerCamelCase ): idx.append(flat_idx % d ) a__ : Union[str, Any] = flat_idx // d return tuple(reversed(lowerCamelCase ) ) @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ): # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(lowerCamelCase ) -> None: a__ : int = True for i in range(len(lowerCamelCase ) ): a__ : Optional[Any] = -1 * (i + 1) l[reversed_idx] &= tally a__ : Tuple = l[reversed_idx] if start_edges is None: a__ : Optional[int] = [s == 0 for s in start] reduce_edge_list(lowerCamelCase ) if end_edges is None: a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )] reduce_edge_list(lowerCamelCase ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCamelCase ) == 0: return [()] elif len(lowerCamelCase ) == 1: return [(slice(start[0] , end[0] + 1 ),)] a__ : List[Tuple[slice, ...]] = [] a__ : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCamelCase , lowerCamelCase ): if s == e: path_list.append(slice(lowerCamelCase , s + 1 ) ) else: break a__ : Tuple[slice, ...] = tuple(lowerCamelCase ) a__ : Optional[Any] = len(lowerCamelCase ) # start == end, and we're done if divergence_idx == len(lowerCamelCase ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : Optional[Any] = start[divergence_idx] return tuple( path + (slice(lowerCamelCase , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : List[str] = end[divergence_idx] return tuple( path + (slice(lowerCamelCase , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) a__ : Optional[int] = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : Optional[int] = t.shape[:no_batch_dims] a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) ) # _get_minimal_slice_set is inclusive a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) ) # Get an ordered list of slices to perform a__ : str = _get_minimal_slice_set( lowerCamelCase , lowerCamelCase , lowerCamelCase , ) a__ : Any = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ): if not (len(lowerCamelCase ) > 0): raise ValueError("Must provide at least one input" ) a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )] a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] ) def _prep_inputs(lowerCamelCase ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase ) a__ : str = None if _out is not None: a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) a__ : Optional[Any] = 1 for d in orig_batch_dims: flat_batch_dim *= d a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(lowerCamelCase ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t a__ : str = 0 a__ : Any = prepped_outputs for _ in range(lowerCamelCase ): # Chunk the input if not low_mem: a__ : str = _select_chunk else: a__ : Tuple = partial( _chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , ) a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase ) # Run the layer on the chunk a__ : Any = layer(**lowerCamelCase ) # Allocate space for the output if out is None: a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase ) # Put the chunk in its pre-allocated space if isinstance(lowerCamelCase , lowerCamelCase ): def assign(lowerCamelCase , lowerCamelCase ) -> None: for k, v in da.items(): if isinstance(lowerCamelCase , lowerCamelCase ): assign(lowerCamelCase , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: a__ : Dict = da[k] assign(lowerCamelCase , lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): for xa, xa in zip(lowerCamelCase , lowerCamelCase ): if _add_into_out: xa[i : i + chunk_size] += xa else: a__ : Dict = xa elif isinstance(lowerCamelCase , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: a__ : Dict = output_chunk else: raise ValueError("Not supported" ) i += chunk_size a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase ) return out class __lowerCAmelCase : def __init__( self , snake_case = 512 , ) -> List[str]: """simple docstring""" a__ : int = max_chunk_size a__ : Optional[int] = None a__ : Optional[tuple] = None def _snake_case ( self , snake_case , snake_case , snake_case ) -> int: """simple docstring""" logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] a__ : List[str] = [c for c in candidates if c > min_chunk_size] a__ : Optional[int] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(snake_case ) -> bool: try: with torch.no_grad(): fn(*snake_case , chunk_size=snake_case ) return True except RuntimeError: return False a__ : Union[str, Any] = 0 a__ : Dict = len(snake_case ) - 1 while i > min_viable_chunk_size_index: a__ : Any = test_chunk_size(candidates[i] ) if not viable: a__ : List[Any] = (min_viable_chunk_size_index + i) // 2 else: a__ : Tuple = i a__ : Any = (i + len(snake_case ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _snake_case ( self , snake_case , snake_case ) -> bool: """simple docstring""" a__ : str = True for aa, aa in zip(snake_case , snake_case ): assert type(snake_case ) == type(snake_case ) if isinstance(snake_case , (list, tuple) ): consistent &= self._compare_arg_caches(snake_case , snake_case ) elif isinstance(snake_case , snake_case ): a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] consistent &= self._compare_arg_caches(snake_case , snake_case ) else: consistent &= aa == aa return consistent def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int: """simple docstring""" a__ : List[Any] = True a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(snake_case ) a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case ) else: # Otherwise, we can reuse the precomputed value a__ : Optional[int] = False if not consistent: a__ : List[str] = self._determine_favorable_chunk_size( snake_case , snake_case , snake_case , ) a__ : List[str] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
629
0
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig SCREAMING_SNAKE_CASE__ : List[str] = { "facebook/maskformer-swin-base-ade": ( "https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : List[str] = """maskformer""" _UpperCamelCase : Tuple = {"""hidden_size""": """mask_feature_size"""} _UpperCamelCase : Optional[int] = ["""resnet""", """swin"""] _UpperCamelCase : Any = ["""detr"""] def __init__( self , snake_case = 256 , snake_case = 256 , snake_case = 0.1 , snake_case = False , snake_case = None , snake_case = None , snake_case = 0.02 , snake_case = 1.0 , snake_case = 1.0 , snake_case = 1.0 , snake_case = 20.0 , snake_case = None , **snake_case , ) -> str: """simple docstring""" if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k a__ : Optional[int] = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): a__ : Any = backbone_config.pop("model_type" ) a__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] a__ : int = config_class.from_dict(UpperCAmelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """ F"""Supported model types: {",".join(self.backbones_supported )}""" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 a__ : int = DetrConfig() else: # verify that the decoder is supported a__ : List[str] = ( decoder_config.pop("model_type" ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F"""Transformer Decoder {decoder_type} not supported, please use one of""" F""" {",".join(self.decoders_supported )}""" ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): a__ : Optional[Any] = CONFIG_MAPPING[decoder_type] a__ : Optional[Any] = config_class.from_dict(UpperCAmelCase__ ) a__ : Any = backbone_config a__ : Optional[int] = decoder_config # main feature dimension for the model a__ : List[str] = fpn_feature_size a__ : Union[str, Any] = mask_feature_size # initializer a__ : Optional[int] = init_std a__ : Any = init_xavier_std # Hungarian matcher && loss a__ : Optional[int] = cross_entropy_weight a__ : str = dice_weight a__ : str = mask_weight a__ : List[Any] = use_auxiliary_loss a__ : str = no_object_weight a__ : str = output_auxiliary_logits a__ : Dict = self.decoder_config.encoder_attention_heads a__ : Tuple = self.decoder_config.num_hidden_layers super().__init__(**UpperCAmelCase__ ) @classmethod def _snake_case ( cls , snake_case , snake_case , **snake_case ) -> Dict: """simple docstring""" return cls( backbone_config=UpperCAmelCase__ , decoder_config=UpperCAmelCase__ , **UpperCAmelCase__ , ) def _snake_case ( self ) -> Dict[str, any]: """simple docstring""" a__ : Optional[int] = copy.deepcopy(self.__dict__ ) a__ : Tuple = self.backbone_config.to_dict() a__ : str = self.decoder_config.to_dict() a__ : Optional[int] = self.__class__.model_type return output
721
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : int = """upernet""" def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]: """simple docstring""" super().__init__(**snake_case ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(snake_case , snake_case ): a__ : Optional[int] = backbone_config.get("model_type" ) a__ : str = CONFIG_MAPPING[backbone_model_type] a__ : str = config_class.from_dict(snake_case ) a__ : int = backbone_config a__ : Optional[Any] = hidden_size a__ : Optional[Any] = initializer_range a__ : Tuple = pool_scales a__ : Optional[Any] = use_auxiliary_head a__ : Optional[Any] = auxiliary_loss_weight a__ : Dict = auxiliary_in_channels a__ : Optional[int] = auxiliary_channels a__ : Any = auxiliary_num_convs a__ : Any = auxiliary_concat_input a__ : int = loss_ignore_index def _snake_case ( self ) -> str: """simple docstring""" a__ : Tuple = copy.deepcopy(self.__dict__ ) a__ : Optional[Any] = self.backbone_config.to_dict() a__ : List[Any] = self.__class__.model_type return output
629
0
from queue import PriorityQueue from typing import Any import numpy as np def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ): for nxt, d in graph[v]: if nxt in visited_forward: continue a__ : Dict = cst_fwd.get(lowerCamelCase , np.inf ) a__ : Union[str, Any] = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) a__ : Union[str, Any] = new_cost_f a__ : Tuple = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: a__ : int = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : str = -1 a__ : Tuple = set() a__ : List[Any] = set() a__ : List[str] = {source: 0} a__ : Optional[int] = {destination: 0} a__ : Union[str, Any] = {source: None} a__ : Union[str, Any] = {destination: None} a__ : Optional[int] = PriorityQueue() a__ : int = PriorityQueue() a__ : Optional[Any] = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): a__ , a__ : List[str] = queue_forward.get() visited_forward.add(lowerCamelCase ) a__ , a__ : Optional[Any] = queue_backward.get() visited_backward.add(lowerCamelCase ) a__ : Optional[int] = pass_and_relaxation( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) a__ : Optional[Any] = pass_and_relaxation( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: a__ : Tuple = shortest_distance return shortest_path_distance SCREAMING_SNAKE_CASE__ : int = { """B""": [["""C""", 1]], """C""": [["""D""", 1]], """D""": [["""F""", 1]], """E""": [["""B""", 1], ["""G""", 2]], """F""": [], """G""": [["""F""", 1]], } SCREAMING_SNAKE_CASE__ : Optional[int] = { """B""": [["""E""", 1]], """C""": [["""B""", 1]], """D""": [["""C""", 1]], """F""": [["""D""", 1], ["""G""", 1]], """E""": [[None, np.inf]], """G""": [["""E""", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
700
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): SCREAMING_SNAKE_CASE__ : int = { """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: SCREAMING_SNAKE_CASE__ : Dict = { """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _A ( lowerCamelCase ): a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 ) a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() a__ : int = numpy_to_pil(lowerCamelCase ) return images def _A ( lowerCamelCase ): if images.ndim == 3: a__ : Tuple = images[None, ...] a__ : Dict = (images * 255).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
629
0
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[int] = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def _A ( lowerCamelCase ): a__ : Optional[Any] = EfficientNetConfig() a__ : List[str] = CONFIG_MAP[model_name]["hidden_dim"] a__ : Optional[Any] = CONFIG_MAP[model_name]["width_coef"] a__ : List[str] = CONFIG_MAP[model_name]["depth_coef"] a__ : Optional[int] = CONFIG_MAP[model_name]["image_size"] a__ : int = CONFIG_MAP[model_name]["dropout_rate"] a__ : List[str] = CONFIG_MAP[model_name]["dw_padding"] a__ : int = "huggingface/label-files" a__ : List[str] = "imagenet-1k-id2label.json" a__ : Union[str, Any] = 1000 a__ : int = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) a__ : List[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} a__ : List[str] = idalabel a__ : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def _A ( ): a__ : str = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : int = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def _A ( lowerCamelCase ): a__ : List[str] = CONFIG_MAP[model_name]["image_size"] a__ : Dict = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=_SCREAMING_SNAKE_CASE , ) return preprocessor def _A ( lowerCamelCase ): a__ : List[str] = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] a__ : str = sorted(set(_SCREAMING_SNAKE_CASE ) ) a__ : str = len(_SCREAMING_SNAKE_CASE ) a__ : Dict = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )} a__ : Optional[Any] = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: a__ : Optional[int] = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) a__ : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: a__ : str = "efficientnet." + item[1] a__ : List[str] = "classifier.weight" a__ : Union[str, Any] = "classifier.bias" return key_mapping def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): for key, value in tf_params.items(): if "normalization" in key: continue a__ : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: a__ : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: a__ : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: a__ : Dict = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) ) else: a__ : Tuple = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : Dict = model_classes[model_name]( include_top=_SCREAMING_SNAKE_CASE , weights="imagenet" , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation="softmax" , ) a__ : Union[str, Any] = original_model.trainable_variables a__ : List[Any] = original_model.non_trainable_variables a__ : Tuple = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: a__ : Optional[int] = param.numpy() a__ : Any = list(tf_params.keys() ) # Load HuggingFace model a__ : Optional[Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE ) a__ : List[str] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() a__ : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) a__ : Any = rename_keys(_SCREAMING_SNAKE_CASE ) replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image a__ : Optional[Any] = convert_image_processor(_SCREAMING_SNAKE_CASE ) a__ : Dict = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): a__ : Tuple = hf_model(**_SCREAMING_SNAKE_CASE ) a__ : Any = outputs.logits.detach().numpy() # Original model inference a__ : List[Any] = False a__ : int = CONFIG_MAP[model_name]["image_size"] a__ : List[str] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) a__ : Optional[Any] = image.img_to_array(_SCREAMING_SNAKE_CASE ) a__ : Dict = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 ) a__ : int = original_model.predict(_SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.mkdir(_SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) a__ : List[Any] = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
701
# Lint as: python3 import itertools import os import re SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""") SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""") SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""") SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""") SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*""" def _A ( lowerCamelCase ): a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase ) a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase ) return name.lower() def _A ( lowerCamelCase ): a__ : Tuple = _single_underscore_re.split(lowerCamelCase ) a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" ) def _A ( lowerCamelCase ): if os.path.basename(lowerCamelCase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) return camelcase_to_snakecase(lowerCamelCase ) def _A ( lowerCamelCase , lowerCamelCase ): if os.path.basename(lowerCamelCase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) if not re.match(_split_re , lowerCamelCase ): raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" ) return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}""" def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ): a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase ) if filetype_suffix: prefix += F""".{filetype_suffix}""" a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase ) return F"""{filepath}*""" def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ): a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase ) a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase ) if shard_lengths: a__ : List[str] = len(lowerCamelCase ) a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )] if filetype_suffix: a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames] return filenames else: a__ : Optional[int] = prefix if filetype_suffix: filename += F""".{filetype_suffix}""" return [filename]
629
0
import os def _A ( ): with open(os.path.dirname(lowercase__ ) + "/p022_names.txt" ) as file: a__ : Union[str, Any] = str(file.readlines()[0] ) a__ : Optional[int] = names.replace("\"" , "" ).split("," ) names.sort() a__ : Optional[int] = 0 a__ : Any = 0 for i, name in enumerate(lowercase__ ): for letter in name: name_score += ord(lowercase__ ) - 64 total_score += (i + 1) * name_score a__ : Union[str, Any] = 0 return total_score if __name__ == "__main__": print(solution())
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ """SEW_PRETRAINED_MODEL_ARCHIVE_LIST""", """SEWForCTC""", """SEWForSequenceClassification""", """SEWModel""", """SEWPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
def _A ( lowerCamelCase , lowerCamelCase ): if not len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can\'t be zero." ) # Extract the coefficients a__ : Optional[Any] = equationa a__ : Tuple = equationa # Calculate the determinants of the matrices a__ : List[Any] = aa * ba - aa * ba a__ : Dict = ca * ba - ca * ba a__ : int = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: a__ : Optional[int] = determinant_x / determinant a__ : Optional[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
703
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : int = { """configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""], """tokenization_cpmant""": ["""CpmAntTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = [ """CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""", """CpmAntForCausalLM""", """CpmAntModel""", """CpmAntPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
def _A ( lowerCamelCase , lowerCamelCase ): a__ : Dict = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : List[Any] = 0 while b > 0: if b & 1: a__ : Union[str, Any] = ((res % c) + (a % c)) % c a += a b >>= 1 return res
704
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin SCREAMING_SNAKE_CASE__ : Dict = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ): def _snake_case ( self ) -> str: """simple docstring""" a__ : Optional[int] = load_tool("text-question-answering" ) self.tool.setup() a__ : Dict = load_tool("text-question-answering" , remote=snake_case ) def _snake_case ( self ) -> Dict: """simple docstring""" a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> Any: """simple docstring""" a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> int: """simple docstring""" a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
629
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): _UpperCamelCase : Optional[Any] = """donut-swin""" _UpperCamelCase : Optional[int] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , **snake_case , ) -> Optional[Any]: """simple docstring""" super().__init__(**__snake_case ) a__ : Optional[Any] = image_size a__ : Union[str, Any] = patch_size a__ : Optional[Any] = num_channels a__ : Dict = embed_dim a__ : Optional[int] = depths a__ : List[Any] = len(__snake_case ) a__ : Any = num_heads a__ : int = window_size a__ : Dict = mlp_ratio a__ : str = qkv_bias a__ : int = hidden_dropout_prob a__ : List[Any] = attention_probs_dropout_prob a__ : List[Any] = drop_path_rate a__ : Union[str, Any] = hidden_act a__ : str = use_absolute_embeddings a__ : Dict = layer_norm_eps a__ : Optional[int] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a__ : Optional[int] = int(embed_dim * 2 ** (len(__snake_case ) - 1) )
705
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowerCAmelCase ( _UpperCamelCase ): @require_torch def _snake_case ( self ) -> str: """simple docstring""" a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__ : Tuple = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__ : Dict = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Tuple = "1" a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__ : List[Any] = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__ : Any = self.get_env() a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n " a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n " a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " # baseline - just load from_pretrained with normal network a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a__ : str = self.get_env() a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # next emulate no network a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Union[str, Any] = "1" a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Optional[Any] = "\nfrom transformers import pipeline\n " a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n " a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " a__ : List[str] = self.get_env() a__ : Union[str, Any] = "1" a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )] a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , ) @require_torch def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Any = "\nfrom transformers import AutoModel\n " a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n " # baseline - just load from_pretrained with normal network a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a__ : Optional[Any] = self.get_env() a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Dict = "1" a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() )
629
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : List[Any] = "data2vec-vision" def __init__( self , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Any: """simple docstring""" super().__init__(**snake_case ) a__ : Optional[int] = hidden_size a__ : Tuple = num_hidden_layers a__ : Tuple = num_attention_heads a__ : Any = intermediate_size a__ : int = hidden_act a__ : str = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Tuple = initializer_range a__ : Dict = layer_norm_eps a__ : Any = image_size a__ : List[Any] = patch_size a__ : List[str] = num_channels a__ : Optional[int] = use_mask_token a__ : Dict = use_absolute_position_embeddings a__ : Any = use_relative_position_bias a__ : str = use_shared_relative_position_bias a__ : int = layer_scale_init_value a__ : List[str] = drop_path_rate a__ : List[str] = use_mean_pooling # decode head attributes (semantic segmentation) a__ : List[Any] = out_indices a__ : Tuple = pool_scales # auxiliary head attributes (semantic segmentation) a__ : Tuple = use_auxiliary_head a__ : Optional[int] = auxiliary_loss_weight a__ : List[Any] = auxiliary_channels a__ : Any = auxiliary_num_convs a__ : Optional[int] = auxiliary_concat_input a__ : Tuple = semantic_loss_ignore_index class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[int] = version.parse("""1.11""" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self ) -> float: """simple docstring""" return 1E-4
706
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ """VAN_PRETRAINED_MODEL_ARCHIVE_LIST""", """VanForImageClassification""", """VanModel""", """VanPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
629
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
707
from PIL import Image def _A ( lowerCamelCase , lowerCamelCase ): def brightness(lowerCamelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("level must be between -255.0 (black) and 255.0 (white)" ) return img.point(lowerCamelCase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
629
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _A ( lowerCamelCase ): a__ : Any = 384 if "tiny" in model_name: a__ : Dict = [3, 3, 9, 3] a__ : Dict = [96, 192, 384, 768] if "small" in model_name: a__ : Optional[int] = [3, 3, 27, 3] a__ : Tuple = [96, 192, 384, 768] if "base" in model_name: a__ : Any = [3, 3, 27, 3] a__ : Union[str, Any] = [128, 256, 512, 1024] a__ : List[Any] = 512 if "large" in model_name: a__ : Dict = [3, 3, 27, 3] a__ : List[Any] = [192, 384, 768, 1536] a__ : str = 768 if "xlarge" in model_name: a__ : List[str] = [3, 3, 27, 3] a__ : Union[str, Any] = [256, 512, 1024, 2048] a__ : Optional[int] = 1024 # set label information a__ : Tuple = 150 a__ : Optional[int] = "huggingface/label-files" a__ : int = "ade20k-id2label.json" a__ : Any = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) ) a__ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()} a__ : Any = {v: k for k, v in idalabel.items()} a__ : Union[str, Any] = ConvNextConfig( depths=__UpperCamelCase , hidden_sizes=__UpperCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] ) a__ : str = UperNetConfig( backbone_config=__UpperCamelCase , auxiliary_in_channels=__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , ) return config def _A ( lowerCamelCase ): a__ : Dict = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") ) if i > 0: rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : Union[str, Any] = dct.pop(__UpperCamelCase ) a__ : Any = val def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : str = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } a__ : Tuple = model_name_to_url[model_name] a__ : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="cpu" )["state_dict"] a__ : Any = get_upernet_config(__UpperCamelCase ) a__ : Any = UperNetForSemanticSegmentation(__UpperCamelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): a__ : Union[str, Any] = state_dict.pop(__UpperCamelCase ) if "bn" in key: a__ : Tuple = key.replace("bn" , "batch_norm" ) a__ : Optional[int] = val # rename keys a__ : List[Any] = create_rename_keys(__UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # verify on image a__ : Optional[int] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" a__ : List[Any] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert("RGB" ) a__ : Any = SegformerImageProcessor() a__ : Union[str, Any] = processor(__UpperCamelCase , return_tensors="pt" ).pixel_values with torch.no_grad(): a__ : str = model(__UpperCamelCase ) if model_name == "upernet-convnext-tiny": a__ : Union[str, Any] = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": a__ : Dict = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": a__ : Dict = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": a__ : int = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": a__ : Optional[int] = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__UpperCamelCase ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__UpperCamelCase ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-convnext-tiny""", type=str, choices=[f'upernet-convnext-{size}' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]], help="""Name of the ConvNext UperNet model you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
708
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration SCREAMING_SNAKE_CASE__ : List[str] = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def _A ( lowerCamelCase ): a__ : Optional[int] = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(lowerCamelCase , lowerCamelCase ) SCREAMING_SNAKE_CASE__ : List[str] = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def _A ( lowerCamelCase ): a__ : Tuple = list(s_dict.keys() ) for key in keys: a__ : Optional[Any] = key for k, v in WHISPER_MAPPING.items(): if k in key: a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase ) print(F"""{key} -> {new_key}""" ) a__ : Dict = s_dict.pop(lowerCamelCase ) return s_dict def _A ( lowerCamelCase ): a__ , a__ : Any = emb.weight.shape a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) a__ : Optional[Any] = emb.weight.data return lin_layer def _A ( lowerCamelCase , lowerCamelCase ): os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) a__ : Optional[Any] = os.path.basename(lowerCamelCase ) a__ : List[Any] = url.split("/" )[-2] a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase ) if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ): raise RuntimeError(F"""{download_target} exists and is not a regular file""" ) if os.path.isfile(lowerCamelCase ): a__ : Any = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" ) with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output: with tqdm( total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop: while True: a__ : Optional[Any] = source.read(8192 ) if not buffer: break output.write(lowerCamelCase ) loop.update(len(lowerCamelCase ) ) a__ : Optional[int] = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes def _A ( lowerCamelCase , lowerCamelCase ): if ".pt" not in checkpoint_path: a__ : str = _download(_MODELS[checkpoint_path] ) else: a__ : str = torch.load(lowerCamelCase , map_location="cpu" ) a__ : Dict = original_checkpoint["dims"] a__ : Optional[int] = original_checkpoint["model_state_dict"] a__ : Any = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(lowerCamelCase ) rename_keys(lowerCamelCase ) a__ : Optional[Any] = True a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0] a__ : Tuple = WhisperConfig( vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , ) a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase ) a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F""" but all the following weights are missing {missing}""" ) if tie_embeds: a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens ) else: a__ : str = proj_out_weights model.save_pretrained(lowerCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() # # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
629
0
def _A ( lowerCamelCase , lowerCamelCase ): a__ : Optional[Any] = "" for i in table: res += inp[i - 1] return res def _A ( lowerCamelCase ): return data[1:] + data[0] def _A ( lowerCamelCase , lowerCamelCase ): a__ : Union[str, Any] = "" for i in range(len(lowerCamelCase ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def _A ( lowerCamelCase , lowerCamelCase ): a__ : List[str] = int("0b" + data[0] + data[-1] , 2 ) a__ : Any = int("0b" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : List[str] = message[:4] a__ : Dict = message[4:] a__ : Dict = apply_table(lowerCamelCase , lowerCamelCase ) a__ : int = xor(lowerCamelCase , lowerCamelCase ) a__ : Optional[int] = apply_sbox(lowerCamelCase , temp[:4] ) # noqa: E741 a__ : str = apply_sbox(lowerCamelCase , temp[4:] ) a__ : List[str] = "0" * (2 - len(lowerCamelCase )) + l # noqa: E741 a__ : Any = "0" * (2 - len(lowerCamelCase )) + r a__ : Union[str, Any] = apply_table(l + r , lowerCamelCase ) a__ : int = xor(lowerCamelCase , lowerCamelCase ) return temp + right if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] = input("""Enter 10 bit key: """) SCREAMING_SNAKE_CASE__ : str = input("""Enter 8 bit message: """) SCREAMING_SNAKE_CASE__ : List[str] = [6, 3, 7, 4, 8, 5, 1_0, 9] SCREAMING_SNAKE_CASE__ : Tuple = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] SCREAMING_SNAKE_CASE__ : Optional[Any] = [2, 4, 3, 1] SCREAMING_SNAKE_CASE__ : int = [2, 6, 3, 1, 4, 8, 5, 7] SCREAMING_SNAKE_CASE__ : int = [4, 1, 3, 5, 7, 2, 8, 6] SCREAMING_SNAKE_CASE__ : Dict = [4, 1, 2, 3, 2, 3, 4, 1] SCREAMING_SNAKE_CASE__ : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] SCREAMING_SNAKE_CASE__ : str = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation SCREAMING_SNAKE_CASE__ : Dict = apply_table(key, paa_table) SCREAMING_SNAKE_CASE__ : str = temp[:5] SCREAMING_SNAKE_CASE__ : str = temp[5:] SCREAMING_SNAKE_CASE__ : Tuple = left_shift(left) SCREAMING_SNAKE_CASE__ : Union[str, Any] = left_shift(right) SCREAMING_SNAKE_CASE__ : Union[str, Any] = apply_table(left + right, pa_table) SCREAMING_SNAKE_CASE__ : Union[str, Any] = left_shift(left) SCREAMING_SNAKE_CASE__ : int = left_shift(right) SCREAMING_SNAKE_CASE__ : Any = left_shift(left) SCREAMING_SNAKE_CASE__ : List[str] = left_shift(right) SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_table(left + right, pa_table) # encryption SCREAMING_SNAKE_CASE__ : int = apply_table(message, IP) SCREAMING_SNAKE_CASE__ : Dict = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ : Dict = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ : Dict = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ : Union[str, Any] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_table(CT, IP) SCREAMING_SNAKE_CASE__ : Union[str, Any] = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ : str = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ : Tuple = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ : Tuple = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
709
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[Any] = """informer""" _UpperCamelCase : Any = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]: """simple docstring""" a__ : Optional[Any] = prediction_length a__ : Optional[int] = context_length or prediction_length a__ : Optional[int] = distribution_output a__ : str = loss a__ : Optional[Any] = input_size a__ : int = num_time_features a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] a__ : Optional[int] = scaling a__ : List[str] = num_dynamic_real_features a__ : Optional[int] = num_static_real_features a__ : Optional[int] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) a__ : List[Any] = cardinality else: a__ : Tuple = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) a__ : Tuple = embedding_dimension else: a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] a__ : Optional[Any] = num_parallel_samples # Transformer architecture configuration a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features a__ : Union[str, Any] = d_model a__ : Any = encoder_attention_heads a__ : Optional[Any] = decoder_attention_heads a__ : int = encoder_ffn_dim a__ : List[Any] = decoder_ffn_dim a__ : List[str] = encoder_layers a__ : Any = decoder_layers a__ : List[str] = dropout a__ : int = attention_dropout a__ : List[Any] = activation_dropout a__ : Optional[int] = encoder_layerdrop a__ : Tuple = decoder_layerdrop a__ : Any = activation_function a__ : Tuple = init_std a__ : Optional[int] = use_cache # Informer a__ : Union[str, Any] = attention_type a__ : List[str] = sampling_factor a__ : Optional[int] = distil super().__init__(is_encoder_decoder=snake_case , **snake_case ) @property def _snake_case ( self ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
629
0
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __lowerCAmelCase ( unittest.TestCase ): _UpperCamelCase : Optional[int] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def _snake_case ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: """simple docstring""" a__ : Union[str, Any] = hf_hub_download( repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) a__ : int = VideoClassificationPipeline(model=_a , image_processor=_a , top_k=2 ) a__ : Dict = [ example_video_filepath, """https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""", ] return video_classifier, examples def _snake_case ( self , snake_case , snake_case ) -> str: """simple docstring""" for example in examples: a__ : Dict = video_classifier(_a ) self.assertEqual( _a , [ {"score": ANY(_a ), "label": ANY(_a )}, {"score": ANY(_a ), "label": ANY(_a )}, ] , ) @require_torch def _snake_case ( self ) -> Dict: """simple docstring""" a__ : Optional[int] = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification""" a__ : Union[str, Any] = VideoMAEFeatureExtractor( size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} ) a__ : Dict = pipeline( "video-classification" , model=_a , feature_extractor=_a , frame_sampling_rate=4 ) a__ : List[Any] = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) a__ : Dict = video_classifier(_a , top_k=2 ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}] , ) a__ : Optional[Any] = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ] , ) @require_tf def _snake_case ( self ) -> Dict: """simple docstring""" pass
710
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random""" SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random""" @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> List[Any]: """simple docstring""" return AutoConfig.from_pretrained(snake_case ) def _snake_case ( self ) -> Any: """simple docstring""" a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _snake_case ( self ) -> str: """simple docstring""" a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _snake_case ( self ) -> int: """simple docstring""" with self.assertRaises(snake_case ): create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
629
0
from sklearn.metrics import fa_score import datasets SCREAMING_SNAKE_CASE__ : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' SCREAMING_SNAKE_CASE__ : Any = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' SCREAMING_SNAKE_CASE__ : Union[str, Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _snake_case ( self ) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def _snake_case ( self , snake_case , snake_case , snake_case=None , snake_case=1 , snake_case="binary" , snake_case=None ) -> Any: """simple docstring""" a__ : Optional[Any] = fa_score( UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ , pos_label=UpperCamelCase__ , average=UpperCamelCase__ , sample_weight=UpperCamelCase__ ) return {"f1": float(UpperCamelCase__ ) if score.size == 1 else score}
711
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1""" SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2""" SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3""" SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4""" class __lowerCAmelCase ( _UpperCamelCase ): def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any: """simple docstring""" super()._init_() a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : int = StableDiffusionPipeline( vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _snake_case ( self ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )} def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a__ : List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def _snake_case ( self ) -> Tuple: """simple docstring""" self.enable_attention_slicing(snake_case ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" a__ : Any = "cuda" if torch.cuda.is_available() else "cpu" self.to(snake_case ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 a__ : Any = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.2 a__ : List[Any] = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.3 a__ : Optional[Any] = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.4 a__ : Dict = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
629
0
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def _A ( lowerCamelCase ): # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def _A ( ): with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" a__ : Union[str, Any] = [1, 2, 3] with pytest.raises(lowerCamelCase_ ): with parallel_backend("unsupported backend" ): map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=2 ) with pytest.raises(lowerCamelCase_ ): with parallel_backend("unsupported backend" ): map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def _A ( lowerCamelCase ): a__ : str = [1, 2] a__ : Optional[int] = {"a": 1, "b": 2} a__ : int = {"a": [1, 2], "b": [3, 4]} a__ : Optional[int] = {"a": {"1": 1}, "b": 2} a__ : Any = {"a": 1, "b": 2, "c": 3, "d": 4} a__ : Tuple = [2, 3] a__ : List[Any] = {"a": 2, "b": 3} a__ : int = {"a": [2, 3], "b": [4, 5]} a__ : str = {"a": {"1": 2}, "b": 3} a__ : Optional[Any] = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
712
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665 def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ): if fluid_density <= 0: raise ValueError("Impossible fluid density" ) if volume < 0: raise ValueError("Impossible Object volume" ) if gravity <= 0: raise ValueError("Impossible Gravity" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
629
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __lowerCAmelCase ( unittest.TestCase ): def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : str = tempfile.mkdtemp() a__ : Dict = BlipImageProcessor() a__ : Optional[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) a__ : Optional[Any] = BlipProcessor(snake_case , snake_case ) processor.save_pretrained(self.tmpdirname ) def _snake_case ( self , **snake_case ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).tokenizer def _snake_case ( self , **snake_case ) -> Optional[int]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor def _snake_case ( self ) -> List[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _snake_case ( self ) -> int: """simple docstring""" a__ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a__ : Union[str, Any] = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Optional[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a__ : str = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) a__ : str = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 ) a__ : List[Any] = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : Union[str, Any] = self.get_image_processor() a__ : Optional[int] = self.get_tokenizer() a__ : List[str] = BlipProcessor(tokenizer=snake_case , image_processor=snake_case ) a__ : int = self.prepare_image_inputs() a__ : Any = image_processor(snake_case , return_tensors="np" ) a__ : str = processor(images=snake_case , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _snake_case ( self ) -> str: """simple docstring""" a__ : Union[str, Any] = self.get_image_processor() a__ : List[str] = self.get_tokenizer() a__ : str = BlipProcessor(tokenizer=snake_case , image_processor=snake_case ) a__ : int = "lower newer" a__ : Dict = processor(text=snake_case ) a__ : Dict = tokenizer(snake_case , return_token_type_ids=snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case ( self ) -> str: """simple docstring""" a__ : Optional[int] = self.get_image_processor() a__ : Optional[Any] = self.get_tokenizer() a__ : str = BlipProcessor(tokenizer=snake_case , image_processor=snake_case ) a__ : Optional[Any] = "lower newer" a__ : List[Any] = self.prepare_image_inputs() a__ : str = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Optional[int] = self.get_image_processor() a__ : Dict = self.get_tokenizer() a__ : Optional[int] = BlipProcessor(tokenizer=snake_case , image_processor=snake_case ) a__ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a__ : str = processor.batch_decode(snake_case ) a__ : Optional[Any] = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def _snake_case ( self ) -> Dict: """simple docstring""" a__ : Union[str, Any] = self.get_image_processor() a__ : str = self.get_tokenizer() a__ : int = BlipProcessor(tokenizer=snake_case , image_processor=snake_case ) a__ : Optional[Any] = "lower newer" a__ : List[str] = self.prepare_image_inputs() a__ : Any = processor(text=snake_case , images=snake_case ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
713
from __future__ import annotations from random import random class __lowerCAmelCase : def __init__( self , snake_case = None ) -> Any: """simple docstring""" a__ : Optional[int] = value a__ : Tuple = random() a__ : Node | None = None a__ : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F"""'{self.value}: {self.prior:.5}'""" else: return pformat( {F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" a__ : List[Any] = str(self.value ) + " " a__ : List[str] = str(self.left or "" ) a__ : Tuple = str(self.right or "" ) return value + left + right def _A ( lowerCamelCase , lowerCamelCase ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: a__ , a__ : Dict = split(root.left , lowerCamelCase ) return left, root else: a__ , a__ : int = split(root.right , lowerCamelCase ) return root, right def _A ( lowerCamelCase , lowerCamelCase ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: a__ : List[Any] = merge(left.right , lowerCamelCase ) return left else: a__ : int = merge(lowerCamelCase , right.left ) return right def _A ( lowerCamelCase , lowerCamelCase ): a__ : Any = Node(lowerCamelCase ) a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase ) return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) def _A ( lowerCamelCase , lowerCamelCase ): a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 ) a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase ) return merge(lowerCamelCase , lowerCamelCase ) def _A ( lowerCamelCase ): if not root: # None return else: inorder(root.left ) print(root.value , end="," ) inorder(root.right ) def _A ( lowerCamelCase , lowerCamelCase ): for arg in args.split(): if arg[0] == "+": a__ : int = insert(lowerCamelCase , int(arg[1:] ) ) elif arg[0] == "-": a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) ) else: print("Unknown command" ) return root def _A ( ): a__ : List[str] = None print( "enter numbers to create a tree, + value to add value into treap, " "- value to erase all nodes with value. 'q' to quit. " ) a__ : int = input() while args != "q": a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase ) print(lowerCamelCase ) a__ : Optional[Any] = input() print("good by!" ) if __name__ == "__main__": import doctest doctest.testmod() main()
629
0
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __lowerCAmelCase ( unittest.TestCase ): _UpperCamelCase : Tuple = MODEL_FOR_MASKED_LM_MAPPING _UpperCamelCase : Any = TF_MODEL_FOR_MASKED_LM_MAPPING def _snake_case ( self ) -> int: """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : List[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" ) a__ : List[Any] = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(__A , decimals=6 ) , [ {"sequence": "My name is grouped", "score": 2.1E-05, "token": 38_015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1E-05, "token": 25_506, "token_str": " accuser"}, ] , ) a__ : Optional[int] = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(__A , decimals=6 ) , [ { "sequence": "The largest city in France is grouped", "score": 2.1E-05, "token": 38_015, "token_str": " grouped", }, { "sequence": "The largest city in France is accuser", "score": 2.1E-05, "token": 25_506, "token_str": " accuser", }, ] , ) a__ : str = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(__A , decimals=6 ) , [ {"sequence": "My name is Clara", "score": 2E-05, "token": 13_606, "token_str": " Clara"}, {"sequence": "My name is Patrick", "score": 2E-05, "token": 3_499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 1.9E-05, "token": 2_941, "token_str": " Te"}, ] , ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[str] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" ) a__ : str = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(__A , decimals=6 ) , [ {"sequence": "My name is Maul", "score": 2.2E-05, "token": 35_676, "token_str": " Maul"}, {"sequence": "My name isELS", "score": 2.2E-05, "token": 16_416, "token_str": "ELS"}, ] , ) a__ : Dict = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(__A , decimals=6 ) , [ { "sequence": "The largest city in France is Maul", "score": 2.2E-05, "token": 35_676, "token_str": " Maul", }, {"sequence": "The largest city in France isELS", "score": 2.2E-05, "token": 16_416, "token_str": "ELS"}, ] , ) a__ : str = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(__A , decimals=6 ) , [ {"sequence": "My name is Patrick", "score": 2.1E-05, "token": 3_499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 2E-05, "token": 2_941, "token_str": " Te"}, {"sequence": "My name is Clara", "score": 2E-05, "token": 13_606, "token_str": " Clara"}, ] , ) a__ : str = unmasker("My name is <mask> <mask>" , top_k=2 ) self.assertEqual( nested_simplify(__A , decimals=6 ) , [ [ { "score": 2.2E-05, "token": 35_676, "token_str": " Maul", "sequence": "<s>My name is Maul<mask></s>", }, {"score": 2.2E-05, "token": 16_416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"}, ], [ { "score": 2.2E-05, "token": 35_676, "token_str": " Maul", "sequence": "<s>My name is<mask> Maul</s>", }, {"score": 2.2E-05, "token": 16_416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"}, ], ] , ) @require_torch_gpu def _snake_case ( self ) -> Dict: """simple docstring""" a__ : str = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" ) # convert model to fp16 pipe.model.half() a__ : str = pipe("Paris is the [MASK] of France." ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__A , __A ) @slow @require_torch def _snake_case ( self ) -> Any: """simple docstring""" a__ : List[Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" ) self.run_large_test(__A ) @slow @require_tf def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Tuple = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" ) self.run_large_test(__A ) def _snake_case ( self , snake_case ) -> Any: """simple docstring""" a__ : Union[str, Any] = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(__A ) , [ {"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"}, {"sequence": "My name is Chris", "score": 0.007, "token": 1_573, "token_str": " Chris"}, ] , ) a__ : List[str] = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(__A ) , [ { "sequence": "The largest city in France is Paris", "score": 0.251, "token": 2_201, "token_str": " Paris", }, { "sequence": "The largest city in France is Lyon", "score": 0.214, "token": 12_790, "token_str": " Lyon", }, ] , ) a__ : Tuple = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(__A ) , [ {"sequence": "My name is Patrick", "score": 0.005, "token": 3_499, "token_str": " Patrick"}, {"sequence": "My name is Clara", "score": 0.000, "token": 13_606, "token_str": " Clara"}, {"sequence": "My name is Te", "score": 0.000, "token": 2_941, "token_str": " Te"}, ] , ) @require_torch def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Union[str, Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" ) a__ : List[str] = None a__ : str = None self.run_pipeline_test(__A , [] ) @require_tf def _snake_case ( self ) -> int: """simple docstring""" a__ : List[str] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" ) a__ : Optional[int] = None a__ : Union[str, Any] = None self.run_pipeline_test(__A , [] ) def _snake_case ( self , snake_case , snake_case , snake_case ) -> Tuple: """simple docstring""" if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" ) a__ : str = FillMaskPipeline(model=__A , tokenizer=__A ) a__ : Any = [ F"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def _snake_case ( self , snake_case , snake_case ) -> Tuple: """simple docstring""" a__ : List[Any] = fill_masker.tokenizer a__ : Union[str, Any] = fill_masker.model a__ : Any = fill_masker( F"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( __A , [ {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, ] , ) a__ : Tuple = fill_masker([F"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( __A , [ {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, ] , ) a__ : str = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( __A , [ [ {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, ], [ {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, ], ] , ) with self.assertRaises(__A ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__A ): fill_masker("This is" ) self.run_test_top_k(__A , __A ) self.run_test_targets(__A , __A ) self.run_test_top_k_targets(__A , __A ) self.fill_mask_with_duplicate_targets_and_top_k(__A , __A ) self.fill_mask_with_multiple_masks(__A , __A ) def _snake_case ( self , snake_case , snake_case ) -> str: """simple docstring""" a__ : str = tokenizer.get_vocab() a__ : int = sorted(vocab.keys() )[:2] # Pipeline argument a__ : Union[str, Any] = FillMaskPipeline(model=__A , tokenizer=__A , targets=__A ) a__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __A , [ {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, ] , ) a__ : Dict = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , __A ) a__ : Dict = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(__A ) ) # Call argument a__ : Dict = FillMaskPipeline(model=__A , tokenizer=__A ) a__ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__A ) self.assertEqual( __A , [ {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, ] , ) a__ : Any = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , __A ) a__ : int = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(__A ) ) # Score equivalence a__ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__A ) a__ : Any = [top_mask["token_str"] for top_mask in outputs] a__ : List[Any] = [top_mask["score"] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__A ) == set(__A ): a__ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__A ) a__ : Union[str, Any] = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__A ) , nested_simplify(__A ) ) # Raises with invalid with self.assertRaises(__A ): a__ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__A ): a__ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[""] ) with self.assertRaises(__A ): a__ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets="" ) def _snake_case ( self , snake_case , snake_case ) -> str: """simple docstring""" a__ : Union[str, Any] = FillMaskPipeline(model=__A , tokenizer=__A , top_k=2 ) a__ : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __A , [ {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, ] , ) a__ : Union[str, Any] = FillMaskPipeline(model=__A , tokenizer=__A ) a__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( __A , [ {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, ] , ) self.assertEqual(nested_simplify(__A ) , nested_simplify(__A ) ) def _snake_case ( self , snake_case , snake_case ) -> Tuple: """simple docstring""" a__ : Any = tokenizer.get_vocab() a__ : List[Any] = FillMaskPipeline(model=__A , tokenizer=__A ) # top_k=2, ntargets=3 a__ : str = sorted(vocab.keys() )[:3] a__ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__A ) # If we use the most probably targets, and filter differently, we should still # have the same results a__ : List[Any] = [el["token_str"] for el in sorted(__A , key=lambda snake_case : x["score"] , reverse=__A )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__A ).issubset(__A ): a__ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__A ) # They should yield exactly the same result self.assertEqual(nested_simplify(__A ) , nested_simplify(__A ) ) def _snake_case ( self , snake_case , snake_case ) -> Optional[Any]: """simple docstring""" a__ : Tuple = FillMaskPipeline(model=__A , tokenizer=__A ) a__ : Optional[Any] = tokenizer.get_vocab() # String duplicates + id duplicates a__ : Union[str, Any] = sorted(vocab.keys() )[:3] a__ : Optional[int] = [targets[0], targets[1], targets[0], targets[2], targets[1]] a__ : Union[str, Any] = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=__A , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__A ) , 3 ) def _snake_case ( self , snake_case , snake_case ) -> Union[str, Any]: """simple docstring""" a__ : Union[str, Any] = FillMaskPipeline(model=__A , tokenizer=__A ) a__ : List[str] = fill_masker( F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( __A , [ [ {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, ], [ {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, ], [ {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, {"sequence": ANY(__A ), "score": ANY(__A ), "token": ANY(__A ), "token_str": ANY(__A )}, ], ] , )
714
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ): _UpperCamelCase : Optional[int] = StableUnCLIPPipeline _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _UpperCamelCase : Any = False def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Any = 32 a__ : int = embedder_hidden_size # prior components torch.manual_seed(0 ) a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) a__ : Optional[Any] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) a__ : int = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , ) torch.manual_seed(0 ) a__ : str = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case ) a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) a__ : Union[str, Any] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) a__ : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , ) torch.manual_seed(0 ) a__ : Tuple = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , ) torch.manual_seed(0 ) a__ : Optional[int] = AutoencoderKL() a__ : Any = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _snake_case ( self , snake_case , snake_case=0 ) -> Dict: """simple docstring""" if str(snake_case ).startswith("mps" ): a__ : Union[str, Any] = torch.manual_seed(snake_case ) else: a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case ) a__ : Any = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Dict = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=snake_case ) def _snake_case ( self ) -> int: """simple docstring""" a__ : int = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=snake_case ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def _snake_case ( self ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 ) a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" ) a__ : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case , snake_case ) def _snake_case ( self ) -> Tuple: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) a__ : Union[str, Any] = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a__ : Union[str, Any] = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) a__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
629
0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : int = set() # keep track of all the paths to be checked a__ : Any = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue a__ : str = queue.pop(0 ) # get the last node from the path a__ : List[Any] = path[-1] if node not in explored: a__ : int = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: a__ : Tuple = list(UpperCamelCase__ ) new_path.append(UpperCamelCase__ ) queue.append(UpperCamelCase__ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(UpperCamelCase__ ) # in case there's no path between the 2 nodes return [] def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 a__ : Union[str, Any] = [start] a__ : Optional[Any] = set(UpperCamelCase__ ) # Keep tab on distances from `start` node. a__ : List[str] = {start: 0, target: -1} while queue: a__ : Optional[int] = queue.pop(0 ) if node == target: a__ : int = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(UpperCamelCase__ ) queue.append(UpperCamelCase__ ) a__ : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : str = { """configuration_distilbert""": [ """DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DistilBertConfig""", """DistilBertOnnxConfig""", ], """tokenization_distilbert""": ["""DistilBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ """DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DistilBertForMaskedLM""", """DistilBertForMultipleChoice""", """DistilBertForQuestionAnswering""", """DistilBertForSequenceClassification""", """DistilBertForTokenClassification""", """DistilBertModel""", """DistilBertPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = [ """TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDistilBertForMaskedLM""", """TFDistilBertForMultipleChoice""", """TFDistilBertForQuestionAnswering""", """TFDistilBertForSequenceClassification""", """TFDistilBertForTokenClassification""", """TFDistilBertMainLayer""", """TFDistilBertModel""", """TFDistilBertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any = [ """FlaxDistilBertForMaskedLM""", """FlaxDistilBertForMultipleChoice""", """FlaxDistilBertForQuestionAnswering""", """FlaxDistilBertForSequenceClassification""", """FlaxDistilBertForTokenClassification""", """FlaxDistilBertModel""", """FlaxDistilBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel SCREAMING_SNAKE_CASE__ : List[str] = HfApi() SCREAMING_SNAKE_CASE__ : str = {} # fmt: off SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) SCREAMING_SNAKE_CASE__ : str = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) SCREAMING_SNAKE_CASE__ : Any = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) SCREAMING_SNAKE_CASE__ : Any = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on SCREAMING_SNAKE_CASE__ : List[Any] = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": SCREAMING_SNAKE_CASE__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split("""/""")[-1] print(f'Started running {mod.modelId}!!!') if mod.modelId.startswith("""CompVis"""): SCREAMING_SNAKE_CASE__ : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: SCREAMING_SNAKE_CASE__ : str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([1_0] * noise.shape[0]) with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Any = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :3_0], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3 ) print(f'{mod.modelId} has passed successfully!!!')
716
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def _A ( lowerCamelCase ): # A local function to see if a dot lands in the circle. def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool: a__ : Any = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle a__ : Union[str, Any] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCamelCase ) ) # The ratio of the area for circle to square is pi/4. a__ : Any = proportion * 4 print(F"""The estimated value of pi is {pi_estimate}""" ) print(F"""The numpy value of pi is {pi}""" ) print(F"""The total error is {abs(pi - pi_estimate )}""" ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ): return mean( function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value) def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ): def identity_function(lowerCamelCase ) -> float: return x a__ : int = area_under_curve_estimator( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) a__ : int = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {expected_value}""" ) print(F"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def _A ( lowerCamelCase ): def function_to_integrate(lowerCamelCase ) -> float: return sqrt(4.0 - x * x ) a__ : int = area_under_curve_estimator( lowerCamelCase , lowerCamelCase , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {pi}""" ) print(F"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
629
0
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class __lowerCAmelCase : def __init__( self , snake_case = None ) -> None: """simple docstring""" if components is None: a__ : List[str] = [] a__ : Optional[int] = list(__a ) def __len__( self ) -> int: """simple docstring""" return len(self.__components ) def __str__( self ) -> str: """simple docstring""" return "(" + ",".join(map(__a , self.__components ) ) + ")" def __add__( self , snake_case ) -> Vector: """simple docstring""" a__ : Optional[Any] = len(self ) if size == len(__a ): a__ : Optional[int] = [self.__components[i] + other.component(__a ) for i in range(__a )] return Vector(__a ) else: raise Exception("must have the same size" ) def __sub__( self , snake_case ) -> Vector: """simple docstring""" a__ : Optional[Any] = len(self ) if size == len(__a ): a__ : Optional[int] = [self.__components[i] - other.component(__a ) for i in range(__a )] return Vector(__a ) else: # error case raise Exception("must have the same size" ) @overload def __mul__( self , snake_case ) -> Vector: """simple docstring""" ... @overload def __mul__( self , snake_case ) -> float: """simple docstring""" ... def __mul__( self , snake_case ) -> float | Vector: """simple docstring""" if isinstance(__a , (float, int) ): a__ : str = [c * other for c in self.__components] return Vector(__a ) elif isinstance(__a , __a ) and len(self ) == len(__a ): a__ : List[Any] = len(self ) a__ : Dict = [self.__components[i] * other.component(__a ) for i in range(__a )] return sum(__a ) else: # error case raise Exception("invalid operand!" ) def _snake_case ( self ) -> Vector: """simple docstring""" return Vector(self.__components ) def _snake_case ( self , snake_case ) -> float: """simple docstring""" if isinstance(__a , __a ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception("index out of range" ) def _snake_case ( self , snake_case , snake_case ) -> None: """simple docstring""" assert -len(self.__components ) <= pos < len(self.__components ) a__ : int = value def _snake_case ( self ) -> float: """simple docstring""" if len(self.__components ) == 0: raise Exception("Vector is empty" ) a__ : Tuple = [c**2 for c in self.__components] return math.sqrt(sum(__a ) ) def _snake_case ( self , snake_case , snake_case = False ) -> float: """simple docstring""" a__ : Tuple = self * other a__ : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _A ( lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) return Vector([0] * dimension ) def _A ( lowerCamelCase , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) and (isinstance(lowerCamelCase , lowerCamelCase )) a__ : Any = [0] * dimension a__ : int = 1 return Vector(lowerCamelCase ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): assert ( isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase ) and (isinstance(lowerCamelCase , (int, float) )) ) return x * scalar + y def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): random.seed(lowerCamelCase ) a__ : List[Any] = [random.randint(lowerCamelCase , lowerCamelCase ) for _ in range(lowerCamelCase )] return Vector(lowerCamelCase ) class __lowerCAmelCase : def __init__( self , snake_case , snake_case , snake_case ) -> None: """simple docstring""" a__ : Union[str, Any] = matrix a__ : int = w a__ : str = h def __str__( self ) -> str: """simple docstring""" a__ : Dict = '' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , snake_case ) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): a__ : Tuple = [] for i in range(self.__height ): a__ : List[Any] = [ self.__matrix[i][j] + other.component(__a , __a ) for j in range(self.__width ) ] matrix.append(__a ) return Matrix(__a , self.__width , self.__height ) else: raise Exception("matrix must have the same dimension!" ) def __sub__( self , snake_case ) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): a__ : str = [] for i in range(self.__height ): a__ : List[str] = [ self.__matrix[i][j] - other.component(__a , __a ) for j in range(self.__width ) ] matrix.append(__a ) return Matrix(__a , self.__width , self.__height ) else: raise Exception("matrices must have the same dimension!" ) @overload def __mul__( self , snake_case ) -> Matrix: """simple docstring""" ... @overload def __mul__( self , snake_case ) -> Vector: """simple docstring""" ... def __mul__( self , snake_case ) -> Vector | Matrix: """simple docstring""" if isinstance(__a , __a ): # matrix-vector if len(__a ) == self.__width: a__ : Tuple = zero_vector(self.__height ) for i in range(self.__height ): a__ : Union[str, Any] = [ self.__matrix[i][j] * other.component(__a ) for j in range(self.__width ) ] ans.change_component(__a , sum(__a ) ) return ans else: raise Exception( "vector must have the same size as the " "number of columns of the matrix!" ) elif isinstance(__a , (int, float) ): # matrix-scalar a__ : str = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(__a , self.__width , self.__height ) return None def _snake_case ( self ) -> int: """simple docstring""" return self.__height def _snake_case ( self ) -> int: """simple docstring""" return self.__width def _snake_case ( self , snake_case , snake_case ) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception("change_component: indices out of bounds" ) def _snake_case ( self , snake_case , snake_case , snake_case ) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: a__ : List[Any] = value else: raise Exception("change_component: indices out of bounds" ) def _snake_case ( self , snake_case , snake_case ) -> float: """simple docstring""" if self.__height != self.__width: raise Exception("Matrix is not square" ) a__ : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__a ) ): a__ : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(__a , self.__width - 1 , self.__height - 1 ).determinant() def _snake_case ( self , snake_case , snake_case ) -> float: """simple docstring""" if self.__height != self.__width: raise Exception("Matrix is not square" ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__a , __a ) else: raise Exception("Indices out of bounds" ) def _snake_case ( self ) -> float: """simple docstring""" if self.__height != self.__width: raise Exception("Matrix is not square" ) if self.__height < 1: raise Exception("Matrix has no element" ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: a__ : Any = [ self.__matrix[0][y] * self.cofactor(0 , __a ) for y in range(self.__width ) ] return sum(__a ) def _A ( lowerCamelCase ): a__ : list[list[float]] = [[0] * n for _ in range(lowerCamelCase )] return Matrix(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): random.seed(lowerCamelCase ) a__ : list[list[float]] = [ [random.randint(lowerCamelCase , lowerCamelCase ) for _ in range(lowerCamelCase )] for _ in range(lowerCamelCase ) ] return Matrix(lowerCamelCase , lowerCamelCase , lowerCamelCase )
717
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _A ( lowerCamelCase , lowerCamelCase ): a__ : Dict = old_name if "patch_embed" in old_name: a__ , a__ , a__ : Union[str, Any] = old_name.split("." ) if layer == "0": a__ : Union[str, Any] = old_name.replace("0" , "convolution1" ) elif layer == "1": a__ : Dict = old_name.replace("1" , "batchnorm_before" ) elif layer == "3": a__ : List[str] = old_name.replace("3" , "convolution2" ) else: a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" ) if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ): a__ : List[str] = r"\b\d{2}\b" if bool(re.search(lowerCamelCase , lowerCamelCase ) ): a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group() else: a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group() if int(match[0] ) < 6: a__ : List[Any] = old_name.replace(lowerCamelCase , "" ) a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] ) a__ : List[Any] = "intermediate_stages." + trimmed_name else: a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" ) if int(match[2] ) < num_meta4D_last_stage: a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] ) else: a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage ) a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index ) if "norm1" in old_name: a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" ) elif "norm2" in old_name: a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" ) elif "fc1" in old_name: a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" ) elif "fc2" in old_name: a__ : Any = trimmed_name.replace("fc2" , "linear_out" ) a__ : Any = "last_stage." + trimmed_name elif "network" in old_name and re.search(r".\d." , lowerCamelCase ): a__ : List[str] = old_name.replace("network" , "intermediate_stages" ) if "fc" in new_name: a__ : str = new_name.replace("fc" , "convolution" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): a__ : str = new_name.replace("norm1" , "batchnorm_before" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): a__ : Any = new_name.replace("norm2" , "batchnorm_after" ) if "proj" in new_name: a__ : Optional[int] = new_name.replace("proj" , "projection" ) if "dist_head" in new_name: a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" ) elif "head" in new_name: a__ : Optional[int] = new_name.replace("head" , "classifier" ) elif "patch_embed" in new_name: a__ : Tuple = "efficientformer." + new_name elif new_name == "norm.weight" or new_name == "norm.bias": a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" ) a__ : Optional[int] = "efficientformer." + new_name else: a__ : List[Any] = "efficientformer.encoder." + new_name return new_name def _A ( lowerCamelCase , lowerCamelCase ): for key in checkpoint.copy().keys(): a__ : Optional[Any] = checkpoint.pop(lowerCamelCase ) a__ : Dict = val return checkpoint def _A ( ): a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return image def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"] a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase ) a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase ) a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] ) a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1 a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() a__ : Dict = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } # prepare image a__ : str = prepare_img() a__ : Dict = 256 a__ : Union[str, Any] = 224 a__ : List[str] = EfficientFormerImageProcessor( size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , ) a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values # original processing pipeline a__ : List[str] = Compose( [ Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ), CenterCrop(lowerCamelCase ), ToTensor(), Normalize(lowerCamelCase , lowerCamelCase ), ] ) a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 ) assert torch.allclose(lowerCamelCase , lowerCamelCase ) a__ : Optional[int] = model(lowerCamelCase ) a__ : Any = outputs.logits a__ : Optional[Any] = (1, 1000) if "l1" in model_name: a__ : Tuple = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: a__ : int = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: a__ : Optional[Any] = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(lowerCamelCase ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print("Pushing model to the hub..." ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
629
0
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED SCREAMING_SNAKE_CASE__ : str = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """allenai/led-base-16384""": 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def _A ( ): a__ : Dict = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) a__ : List[Any] = bs[:] a__ : List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(_lowercase ) cs.append(2**8 + n ) n += 1 a__ : Dict = [chr(_lowercase ) for n in cs] return dict(zip(_lowercase , _lowercase ) ) def _A ( lowerCamelCase ): a__ : int = set() a__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) a__ : Union[str, Any] = char return pairs class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Any = VOCAB_FILES_NAMES _UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : str = ["""input_ids""", """attention_mask"""] def __init__( self , snake_case , snake_case , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , **snake_case , ) -> int: """simple docstring""" a__ : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token a__ : Dict = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token a__ : str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token a__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token a__ : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token a__ : Optional[int] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it a__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding="utf-8" ) as vocab_handle: a__ : Tuple = json.load(__A ) a__ : Optional[int] = {v: k for k, v in self.encoder.items()} a__ : Optional[int] = errors # how to handle errors in decoding a__ : Dict = bytes_to_unicode() a__ : Any = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding="utf-8" ) as merges_handle: a__ : int = merges_handle.read().split("\n" )[1:-1] a__ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges] a__ : Tuple = dict(zip(__A , range(len(__A ) ) ) ) a__ : List[str] = {} a__ : List[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions a__ : str = re.compile(r"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _snake_case ( self ) -> str: """simple docstring""" return len(self.encoder ) def _snake_case ( self ) -> Tuple: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self , snake_case ) -> Tuple: """simple docstring""" if token in self.cache: return self.cache[token] a__ : List[Any] = tuple(__A ) a__ : Any = get_pairs(__A ) if not pairs: return token while True: a__ : str = min(__A , key=lambda snake_case : self.bpe_ranks.get(__A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break a__ , a__ : Tuple = bigram a__ : Dict = [] a__ : int = 0 while i < len(__A ): try: a__ : Any = word.index(__A , __A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) a__ : List[Any] = j if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 a__ : Dict = tuple(__A ) a__ : List[str] = new_word if len(__A ) == 1: break else: a__ : int = get_pairs(__A ) a__ : List[Any] = " ".join(__A ) a__ : Optional[int] = word return word def _snake_case ( self , snake_case ) -> Dict: """simple docstring""" a__ : Optional[int] = [] for token in re.findall(self.pat , __A ): a__ : List[str] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) ) return bpe_tokens def _snake_case ( self , snake_case ) -> List[str]: """simple docstring""" return self.encoder.get(__A , self.encoder.get(self.unk_token ) ) def _snake_case ( self , snake_case ) -> List[Any]: """simple docstring""" return self.decoder.get(__A ) def _snake_case ( self , snake_case ) -> Optional[int]: """simple docstring""" a__ : Union[str, Any] = "".join(__A ) a__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__A ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a__ : int = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) a__ : Union[str, Any] = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" ) a__ : str = 0 with open(__A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) a__ : str = token_index writer.write(" ".join(__A ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self , snake_case , snake_case = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a__ : Dict = [self.cls_token_id] a__ : Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , snake_case , snake_case = None , snake_case = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def _snake_case ( self , snake_case , snake_case = None ) -> List[int]: """simple docstring""" a__ : List[str] = [self.sep_token_id] a__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , snake_case , snake_case=False , **snake_case ) -> Optional[int]: """simple docstring""" a__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()): a__ : List[Any] = " " + text return (text, kwargs) def _snake_case ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ) -> dict: """simple docstring""" a__ : str = super()._pad( encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , ) # Load from model defaults if return_attention_mask is None: a__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Any = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A ) if needs_to_be_padded: a__ : Any = len(__A ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Optional[Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
718
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE__ : Any = { """unc-nlp/lxmert-base-uncased""": 5_1_2, } SCREAMING_SNAKE_CASE__ : Optional[int] = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = LxmertTokenizer def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any: """simple docstring""" super().__init__( snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , ) a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars ): a__ : str = getattr(snake_case , normalizer_state.pop("type" ) ) a__ : Tuple = do_lower_case a__ : Union[str, Any] = strip_accents a__ : str = tokenize_chinese_chars a__ : List[str] = normalizer_class(**snake_case ) a__ : str = do_lower_case def _snake_case ( self , snake_case , snake_case=None ) -> List[str]: """simple docstring""" a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , snake_case , snake_case = None ) -> List[int]: """simple docstring""" a__ : Tuple = [self.sep_token_id] a__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]: """simple docstring""" a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case )
629
0
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) SCREAMING_SNAKE_CASE__ = """hf-internal-testing/tiny-random-bert""" SCREAMING_SNAKE_CASE__ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") SCREAMING_SNAKE_CASE__ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __lowerCAmelCase ( unittest.TestCase ): def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : Union[str, Any] = cached_file(_lowercase , _lowercase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_lowercase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_lowercase , _lowercase ) ) ) with open(os.path.join(_lowercase , "refs" , "main" ) ) as f: a__ : Any = f.read() self.assertEqual(_lowercase , os.path.join(_lowercase , "snapshots" , _lowercase , _lowercase ) ) self.assertTrue(os.path.isfile(_lowercase ) ) # File is cached at the same place the second time. a__ : List[Any] = cached_file(_lowercase , _lowercase ) self.assertEqual(_lowercase , _lowercase ) # Using a specific revision to test the full commit hash. a__ : List[Any] = cached_file(_lowercase , _lowercase , revision="9b8c223" ) self.assertEqual(_lowercase , os.path.join(_lowercase , "snapshots" , _lowercase , _lowercase ) ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" with self.assertRaisesRegex(_lowercase , "is not a valid model identifier" ): a__ : Union[str, Any] = cached_file("tiny-random-bert" , _lowercase ) with self.assertRaisesRegex(_lowercase , "is not a valid git identifier" ): a__ : Tuple = cached_file(_lowercase , _lowercase , revision="aaaa" ) with self.assertRaisesRegex(_lowercase , "does not appear to have a file named" ): a__ : str = cached_file(_lowercase , "conf" ) def _snake_case ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex(_lowercase , "does not appear to have a file named" ): a__ : Any = cached_file(_lowercase , "conf" ) with open(os.path.join(_lowercase , "refs" , "main" ) ) as f: a__ : Any = f.read() self.assertTrue(os.path.isfile(os.path.join(_lowercase , ".no_exist" , _lowercase , "conf" ) ) ) a__ : List[Any] = cached_file(_lowercase , "conf" , _raise_exceptions_for_missing_entries=_lowercase ) self.assertIsNone(_lowercase ) a__ : str = cached_file(_lowercase , "conf" , local_files_only=_lowercase , _raise_exceptions_for_missing_entries=_lowercase ) self.assertIsNone(_lowercase ) a__ : List[Any] = mock.Mock() a__ : List[Any] = 500 a__ : int = {} a__ : Any = HTTPError a__ : Union[str, Any] = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=_lowercase ) as mock_head: a__ : Union[str, Any] = cached_file(_lowercase , "conf" , _raise_exceptions_for_connection_errors=_lowercase ) self.assertIsNone(_lowercase ) # This check we did call the fake head request mock_head.assert_called() def _snake_case ( self ) -> Optional[int]: """simple docstring""" self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , _lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , _lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , _lowercase ) ) def _snake_case ( self ) -> Any: """simple docstring""" self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_lowercase , "is not a valid model identifier" ): get_file_from_repo("bert-base-case" , _lowercase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_lowercase , "is not a valid git identifier" ): get_file_from_repo("bert-base-cased" , _lowercase , revision="ahaha" ) a__ : str = get_file_from_repo("bert-base-cased" , _lowercase ) # The name is the cached name which is not very easy to test, so instead we load the content. a__ : Union[str, Any] = json.loads(open(_lowercase , "r" ).read() ) self.assertEqual(config["hidden_size"] , 768 ) def _snake_case ( self ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: a__ : Any = Path(_lowercase ) / """a.txt""" filename.touch() self.assertEqual(get_file_from_repo(_lowercase , "a.txt" ) , str(_lowercase ) ) self.assertIsNone(get_file_from_repo(_lowercase , "b.txt" ) )
719
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[int] = """mobilenet_v2""" def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int: """simple docstring""" super().__init__(**snake_case ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) a__ : str = num_channels a__ : Dict = image_size a__ : Any = depth_multiplier a__ : str = depth_divisible_by a__ : Optional[int] = min_depth a__ : Dict = expand_ratio a__ : str = output_stride a__ : Optional[int] = first_layer_is_expansion a__ : Union[str, Any] = finegrained_output a__ : Union[str, Any] = hidden_act a__ : str = tf_padding a__ : List[Any] = classifier_dropout_prob a__ : List[Any] = initializer_range a__ : Optional[Any] = layer_norm_eps a__ : str = semantic_loss_ignore_index class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Any = version.parse("""1.11""" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _snake_case ( self ) -> float: """simple docstring""" return 1E-4
629
0
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __lowerCAmelCase ( _UpperCamelCase ): def _snake_case ( self ) -> Dict: """simple docstring""" a__ : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A__ , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(A__ , "num_attention_heads" ) ) class __lowerCAmelCase : def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=3 , snake_case=2 , snake_case=1 , snake_case=16 , snake_case=[128, 256, 384] , snake_case=[4, 6, 8] , snake_case=[2, 3, 4] , snake_case=[16, 16, 16] , snake_case=0 , snake_case=[2, 2, 2] , snake_case=[2, 2, 2] , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=2 , ) -> str: """simple docstring""" a__ : Dict = parent a__ : str = batch_size a__ : Dict = image_size a__ : Optional[int] = num_channels a__ : List[str] = kernel_size a__ : List[str] = stride a__ : Union[str, Any] = padding a__ : int = hidden_sizes a__ : Union[str, Any] = num_attention_heads a__ : Union[str, Any] = depths a__ : List[str] = key_dim a__ : Any = drop_path_rate a__ : List[Any] = patch_size a__ : Optional[int] = attention_ratio a__ : Optional[Any] = mlp_ratio a__ : List[Any] = initializer_range a__ : List[Any] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] a__ : int = is_training a__ : List[str] = use_labels a__ : Tuple = num_labels a__ : int = initializer_range def _snake_case ( self ) -> Any: """simple docstring""" a__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Any = None if self.use_labels: a__ : Dict = ids_tensor([self.batch_size] , self.num_labels ) a__ : str = self.get_config() return config, pixel_values, labels def _snake_case ( self ) -> Tuple: """simple docstring""" return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _snake_case ( self , snake_case , snake_case , snake_case ) -> Optional[int]: """simple docstring""" a__ : Optional[int] = LevitModel(config=A__ ) model.to(A__ ) model.eval() a__ : Tuple = model(A__ ) a__ : str = (self.image_size, self.image_size) a__ : Any = image_size[0], image_size[1] for _ in range(4 ): a__ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) a__ : Union[str, Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def _snake_case ( self , snake_case , snake_case , snake_case ) -> Tuple: """simple docstring""" a__ : Optional[Any] = self.num_labels a__ : Any = LevitForImageClassification(A__ ) model.to(A__ ) model.eval() a__ : List[Any] = model(A__ , labels=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self ) -> Dict: """simple docstring""" a__ : Tuple = self.prepare_config_and_inputs() a__ : List[Any] = config_and_inputs a__ : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ): _UpperCamelCase : Union[str, Any] = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) _UpperCamelCase : Union[str, Any] = ( { '''feature-extraction''': LevitModel, '''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) _UpperCamelCase : List[str] = False _UpperCamelCase : List[Any] = False _UpperCamelCase : int = False _UpperCamelCase : Optional[int] = False _UpperCamelCase : Dict = False def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Dict = LevitModelTester(self ) a__ : Optional[int] = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self ) -> int: """simple docstring""" return @unittest.skip(reason="Levit does not use inputs_embeds" ) def _snake_case ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="Levit does not support input and output embeddings" ) def _snake_case ( self ) -> int: """simple docstring""" pass @unittest.skip(reason="Levit does not output attentions" ) def _snake_case ( self ) -> str: """simple docstring""" pass def _snake_case ( self ) -> str: """simple docstring""" a__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : str = model_class(A__ ) a__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : str = [*signature.parameters.keys()] a__ : Optional[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A__ ) def _snake_case ( self ) -> List[str]: """simple docstring""" def check_hidden_states_output(snake_case , snake_case , snake_case ): a__ : str = model_class(A__ ) model.to(A__ ) model.eval() with torch.no_grad(): a__ : List[Any] = model(**self._prepare_for_class(A__ , A__ ) ) a__ : List[Any] = outputs.hidden_states a__ : int = len(self.model_tester.depths ) + 1 self.assertEqual(len(A__ ) , A__ ) a__ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size) a__ : List[str] = image_size[0], image_size[1] for _ in range(4 ): a__ : str = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) a__ : List[str] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Union[str, Any] = True check_hidden_states_output(A__ , A__ , A__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a__ : Dict = True check_hidden_states_output(A__ , A__ , A__ ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self ) -> str: """simple docstring""" pass def _snake_case ( self , snake_case , snake_case , snake_case=False ) -> Tuple: """simple docstring""" a__ : Tuple = super()._prepare_for_class(A__ , A__ , return_labels=A__ ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) def _snake_case ( self ) -> str: """simple docstring""" if not self.model_tester.is_training: return a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(A__ ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue a__ : Dict = model_class(A__ ) model.to(A__ ) model.train() a__ : Union[str, Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ ) a__ : List[Any] = model(**A__ ).loss loss.backward() def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : Dict = False a__ : Any = True for model_class in self.all_model_classes: if model_class in get_values(A__ ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue a__ : List[Any] = model_class(A__ ) model.gradient_checkpointing_enable() model.to(A__ ) model.train() a__ : Optional[Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ ) a__ : Any = model(**A__ ).loss loss.backward() def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() a__ : Union[str, Any] = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(A__ ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): a__ : Any = problem_type["""title"""] a__ : Any = problem_type["""num_labels"""] a__ : List[Any] = model_class(A__ ) model.to(A__ ) model.train() a__ : Optional[Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ ) if problem_type["num_labels"] > 1: a__ : Any = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) a__ : Dict = inputs["""labels"""].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=A__ ) as warning_list: a__ : Dict = model(**A__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def _snake_case ( self ) -> List[Any]: """simple docstring""" for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = LevitModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) def _A ( ): a__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> List[Any]: """simple docstring""" return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( A__ ) a__ : Optional[int] = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Dict = image_processor(images=A__ , return_tensors="pt" ).to(A__ ) # forward pass with torch.no_grad(): a__ : Optional[int] = model(**A__ ) # verify the logits a__ : List[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , A__ ) a__ : Any = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(A__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1E-4 ) )
720
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def _A ( lowerCamelCase ): a__ : List[str] = [] if isinstance(lowerCamelCase , lowerCamelCase ): for v in tree.values(): shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase ): a__ : List[str] = [] for d in reversed(lowerCamelCase ): idx.append(flat_idx % d ) a__ : Union[str, Any] = flat_idx // d return tuple(reversed(lowerCamelCase ) ) @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ): # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(lowerCamelCase ) -> None: a__ : int = True for i in range(len(lowerCamelCase ) ): a__ : Optional[Any] = -1 * (i + 1) l[reversed_idx] &= tally a__ : Tuple = l[reversed_idx] if start_edges is None: a__ : Optional[int] = [s == 0 for s in start] reduce_edge_list(lowerCamelCase ) if end_edges is None: a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )] reduce_edge_list(lowerCamelCase ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCamelCase ) == 0: return [()] elif len(lowerCamelCase ) == 1: return [(slice(start[0] , end[0] + 1 ),)] a__ : List[Tuple[slice, ...]] = [] a__ : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCamelCase , lowerCamelCase ): if s == e: path_list.append(slice(lowerCamelCase , s + 1 ) ) else: break a__ : Tuple[slice, ...] = tuple(lowerCamelCase ) a__ : Optional[Any] = len(lowerCamelCase ) # start == end, and we're done if divergence_idx == len(lowerCamelCase ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : Optional[Any] = start[divergence_idx] return tuple( path + (slice(lowerCamelCase , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : List[str] = end[divergence_idx] return tuple( path + (slice(lowerCamelCase , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) a__ : Optional[int] = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : Optional[int] = t.shape[:no_batch_dims] a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) ) # _get_minimal_slice_set is inclusive a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) ) # Get an ordered list of slices to perform a__ : str = _get_minimal_slice_set( lowerCamelCase , lowerCamelCase , lowerCamelCase , ) a__ : Any = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ): if not (len(lowerCamelCase ) > 0): raise ValueError("Must provide at least one input" ) a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )] a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] ) def _prep_inputs(lowerCamelCase ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase ) a__ : str = None if _out is not None: a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) a__ : Optional[Any] = 1 for d in orig_batch_dims: flat_batch_dim *= d a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(lowerCamelCase ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t a__ : str = 0 a__ : Any = prepped_outputs for _ in range(lowerCamelCase ): # Chunk the input if not low_mem: a__ : str = _select_chunk else: a__ : Tuple = partial( _chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , ) a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase ) # Run the layer on the chunk a__ : Any = layer(**lowerCamelCase ) # Allocate space for the output if out is None: a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase ) # Put the chunk in its pre-allocated space if isinstance(lowerCamelCase , lowerCamelCase ): def assign(lowerCamelCase , lowerCamelCase ) -> None: for k, v in da.items(): if isinstance(lowerCamelCase , lowerCamelCase ): assign(lowerCamelCase , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: a__ : Dict = da[k] assign(lowerCamelCase , lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): for xa, xa in zip(lowerCamelCase , lowerCamelCase ): if _add_into_out: xa[i : i + chunk_size] += xa else: a__ : Dict = xa elif isinstance(lowerCamelCase , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: a__ : Dict = output_chunk else: raise ValueError("Not supported" ) i += chunk_size a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase ) return out class __lowerCAmelCase : def __init__( self , snake_case = 512 , ) -> List[str]: """simple docstring""" a__ : int = max_chunk_size a__ : Optional[int] = None a__ : Optional[tuple] = None def _snake_case ( self , snake_case , snake_case , snake_case ) -> int: """simple docstring""" logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] a__ : List[str] = [c for c in candidates if c > min_chunk_size] a__ : Optional[int] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(snake_case ) -> bool: try: with torch.no_grad(): fn(*snake_case , chunk_size=snake_case ) return True except RuntimeError: return False a__ : Union[str, Any] = 0 a__ : Dict = len(snake_case ) - 1 while i > min_viable_chunk_size_index: a__ : Any = test_chunk_size(candidates[i] ) if not viable: a__ : List[Any] = (min_viable_chunk_size_index + i) // 2 else: a__ : Tuple = i a__ : Any = (i + len(snake_case ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _snake_case ( self , snake_case , snake_case ) -> bool: """simple docstring""" a__ : str = True for aa, aa in zip(snake_case , snake_case ): assert type(snake_case ) == type(snake_case ) if isinstance(snake_case , (list, tuple) ): consistent &= self._compare_arg_caches(snake_case , snake_case ) elif isinstance(snake_case , snake_case ): a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] consistent &= self._compare_arg_caches(snake_case , snake_case ) else: consistent &= aa == aa return consistent def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int: """simple docstring""" a__ : List[Any] = True a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(snake_case ) a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case ) else: # Otherwise, we can reuse the precomputed value a__ : Optional[int] = False if not consistent: a__ : List[str] = self._determine_favorable_chunk_size( snake_case , snake_case , snake_case , ) a__ : List[str] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
629
0
def _A ( lowerCamelCase ): if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a__ : Union[str, Any] = sorted(string.lower() ) return len(lowerCAmelCase__ ) == len(set(lowerCAmelCase__ ) ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = input("""Enter a string """).strip() SCREAMING_SNAKE_CASE__ : int = is_isogram(input_str) print(f'{input_str} is {"an" if isogram else "not an"} isogram.')
721
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : int = """upernet""" def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]: """simple docstring""" super().__init__(**snake_case ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(snake_case , snake_case ): a__ : Optional[int] = backbone_config.get("model_type" ) a__ : str = CONFIG_MAPPING[backbone_model_type] a__ : str = config_class.from_dict(snake_case ) a__ : int = backbone_config a__ : Optional[Any] = hidden_size a__ : Optional[Any] = initializer_range a__ : Tuple = pool_scales a__ : Optional[Any] = use_auxiliary_head a__ : Optional[Any] = auxiliary_loss_weight a__ : Dict = auxiliary_in_channels a__ : Optional[int] = auxiliary_channels a__ : Any = auxiliary_num_convs a__ : Any = auxiliary_concat_input a__ : int = loss_ignore_index def _snake_case ( self ) -> str: """simple docstring""" a__ : Tuple = copy.deepcopy(self.__dict__ ) a__ : Optional[Any] = self.backbone_config.to_dict() a__ : List[Any] = self.__class__.model_type return output
629
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , ) -> Dict: """simple docstring""" a__ : str = size if size is not None else {"shortest_edge": 18} a__ : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18} a__ : int = parent a__ : str = batch_size a__ : List[Any] = num_channels a__ : Tuple = image_size a__ : int = min_resolution a__ : Any = max_resolution a__ : Dict = do_resize a__ : Optional[Any] = size a__ : Tuple = do_center_crop a__ : Dict = crop_size a__ : int = do_normalize a__ : int = image_mean a__ : List[str] = image_std def _snake_case ( self ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ): _UpperCamelCase : str = LevitImageProcessor if is_vision_available() else None def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Union[str, Any] = LevitImageProcessingTester(self ) @property def _snake_case ( self ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "size" ) ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) a__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def _snake_case ( self ) -> List[Any]: """simple docstring""" pass def _snake_case ( self ) -> Dict: """simple docstring""" a__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input a__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : str = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input a__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input a__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : Any = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
700
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): SCREAMING_SNAKE_CASE__ : int = { """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: SCREAMING_SNAKE_CASE__ : Dict = { """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _A ( lowerCamelCase ): a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 ) a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() a__ : int = numpy_to_pil(lowerCamelCase ) return images def _A ( lowerCamelCase ): if images.ndim == 3: a__ : Tuple = images[None, ...] a__ : Dict = (images * 255).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
629
0
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ : str = """sshleifer/bart-tiny-random""" SCREAMING_SNAKE_CASE__ : Dict = """patrickvonplaten/t5-tiny-random""" @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> Dict: """simple docstring""" return AutoConfig.from_pretrained(snake_case ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ , *a__ : str = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _snake_case ( self ) -> Any: """simple docstring""" a__ , *a__ : Tuple = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _snake_case ( self ) -> int: """simple docstring""" with self.assertRaises(snake_case ): create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
701
# Lint as: python3 import itertools import os import re SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""") SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""") SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""") SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""") SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*""" def _A ( lowerCamelCase ): a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase ) a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase ) return name.lower() def _A ( lowerCamelCase ): a__ : Tuple = _single_underscore_re.split(lowerCamelCase ) a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" ) def _A ( lowerCamelCase ): if os.path.basename(lowerCamelCase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) return camelcase_to_snakecase(lowerCamelCase ) def _A ( lowerCamelCase , lowerCamelCase ): if os.path.basename(lowerCamelCase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) if not re.match(_split_re , lowerCamelCase ): raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" ) return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}""" def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ): a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase ) if filetype_suffix: prefix += F""".{filetype_suffix}""" a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase ) return F"""{filepath}*""" def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ): a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase ) a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase ) if shard_lengths: a__ : List[str] = len(lowerCamelCase ) a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )] if filetype_suffix: a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames] return filenames else: a__ : Optional[int] = prefix if filetype_suffix: filename += F""".{filetype_suffix}""" return [filename]
629
0
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __lowerCAmelCase : def __init__( self , snake_case , snake_case=99 , snake_case=13 , snake_case=16 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=2 , snake_case=32 , snake_case=4 , snake_case=4 , snake_case=30 , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=None , ) -> int: """simple docstring""" a__ : int = parent a__ : Union[str, Any] = batch_size a__ : Dict = decoder_seq_length # For common tests a__ : Optional[Any] = self.decoder_seq_length a__ : Union[str, Any] = is_training a__ : List[Any] = use_attention_mask a__ : int = use_labels a__ : Optional[int] = vocab_size a__ : List[str] = d_model a__ : List[Any] = d_model a__ : Optional[Any] = decoder_layers a__ : Optional[int] = decoder_layers a__ : List[str] = decoder_ffn_dim a__ : int = decoder_attention_heads a__ : Union[str, Any] = decoder_attention_heads a__ : List[str] = eos_token_id a__ : Tuple = bos_token_id a__ : Tuple = pad_token_id a__ : List[Any] = decoder_start_token_id a__ : str = use_cache a__ : int = max_position_embeddings a__ : Any = None a__ : List[str] = decoder_seq_length a__ : Any = 2 a__ : List[str] = 1 def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) a__ : int = None if self.use_attention_mask: a__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) a__ : Dict = None if self.use_labels: a__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) a__ : Optional[Any] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , ) -> Optional[Any]: """simple docstring""" a__ : Dict = True a__ : List[str] = TrOCRDecoder(config=lowercase__ ).to(lowercase__ ).eval() a__ : Optional[int] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass a__ : Union[str, Any] = model(lowercase__ , use_cache=lowercase__ ) a__ : List[str] = model(lowercase__ ) a__ : int = model(lowercase__ , use_cache=lowercase__ ) self.parent.assertTrue(len(lowercase__ ) == len(lowercase__ ) ) self.parent.assertTrue(len(lowercase__ ) == len(lowercase__ ) + 1 ) a__ : List[Any] = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids a__ : str = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and a__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 ) a__ : Union[str, Any] = model(lowercase__ )["last_hidden_state"] a__ : Tuple = model(lowercase__ , past_key_values=lowercase__ )["last_hidden_state"] # select random slice a__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item() a__ : int = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() a__ : Any = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Union[str, Any] = self.prepare_config_and_inputs() a__ : Tuple = config_and_inputs a__ : Optional[Any] = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): _UpperCamelCase : Optional[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () _UpperCamelCase : Any = (TrOCRForCausalLM,) if is_torch_available() else () _UpperCamelCase : List[str] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} _UpperCamelCase : Dict = True _UpperCamelCase : Union[str, Any] = False def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : int = TrOCRStandaloneDecoderModelTester(self , is_training=lowercase__ ) a__ : Union[str, Any] = ConfigTester(self , config_class=lowercase__ ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" pass def _snake_case ( self ) -> Dict: """simple docstring""" pass def _snake_case ( self ) -> Dict: """simple docstring""" pass def _snake_case ( self ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self ) -> str: """simple docstring""" a__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*lowercase__ ) def _snake_case ( self ) -> List[Any]: """simple docstring""" return @unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" pass
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ """SEW_PRETRAINED_MODEL_ARCHIVE_LIST""", """SEWForCTC""", """SEWForSequenceClassification""", """SEWModel""", """SEWPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
from math import factorial def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): if successes > trials: raise ValueError("successes must be lower or equal to trials" ) if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers" ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("the function is defined for non-negative integers" ) if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0" ) a__ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! a__ : Any = float(factorial(SCREAMING_SNAKE_CASE_ ) ) coefficient /= factorial(SCREAMING_SNAKE_CASE_ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("""Probability of 2 successes out of 4 trails""") print("""with probability of 0.75 is:""", end=""" """) print(binomial_distribution(2, 4, 0.75))
703
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : int = { """configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""], """tokenization_cpmant""": ["""CpmAntTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = [ """CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""", """CpmAntForCausalLM""", """CpmAntModel""", """CpmAntPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ : List[str] = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """google/bigbird-roberta-base""": 4_0_9_6, """google/bigbird-roberta-large""": 4_0_9_6, """google/bigbird-base-trivia-itc""": 4_0_9_6, } SCREAMING_SNAKE_CASE__ : str = """▁""" class __lowerCAmelCase ( __A ): _UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES _UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Dict = BigBirdTokenizer _UpperCamelCase : Tuple = ["""input_ids""", """attention_mask"""] _UpperCamelCase : Optional[int] = [] def __init__( self , snake_case=None , snake_case=None , snake_case="<unk>" , snake_case="<s>" , snake_case="</s>" , snake_case="<pad>" , snake_case="[SEP]" , snake_case="[MASK]" , snake_case="[CLS]" , **snake_case , ) -> List[str]: """simple docstring""" a__ : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token a__ : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token a__ : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token a__ : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token a__ : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token a__ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token # Mask token behave like a normal word, i.e. include the space before it a__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( snake_case , tokenizer_file=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , **snake_case , ) a__ : List[str] = vocab_file a__ : Dict = False if not self.vocab_file else True def _snake_case ( self , snake_case , snake_case = None ) -> List[Any]: """simple docstring""" a__ : List[Any] = [self.sep_token_id] a__ : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self , snake_case , snake_case = None , snake_case = False ) -> int: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(snake_case )) + [1] return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1] def _snake_case ( self , snake_case , snake_case = None ) -> Union[str, Any]: """simple docstring""" a__ : Optional[Any] = [self.sep_token_id] a__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , snake_case , snake_case = None ) -> Optional[Any]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(snake_case ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a__ : Optional[Any] = os.path.join( snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ): copyfile(self.vocab_file , snake_case ) return (out_vocab_file,)
704
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin SCREAMING_SNAKE_CASE__ : Dict = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ): def _snake_case ( self ) -> str: """simple docstring""" a__ : Optional[int] = load_tool("text-question-answering" ) self.tool.setup() a__ : Dict = load_tool("text-question-answering" , remote=snake_case ) def _snake_case ( self ) -> Dict: """simple docstring""" a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> Any: """simple docstring""" a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> int: """simple docstring""" a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
629
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __lowerCAmelCase ( __lowercase ): _UpperCamelCase : int = """pix2struct_text_model""" _UpperCamelCase : List[Any] = ["""past_key_values"""] _UpperCamelCase : Union[str, Any] = { """hidden_size""": """hidden_size""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , snake_case=50_244 , snake_case=768 , snake_case=64 , snake_case=2_048 , snake_case=12 , snake_case=12 , snake_case=32 , snake_case=128 , snake_case=0.1 , snake_case=1E-6 , snake_case=1.0 , snake_case="gelu_new" , snake_case=0 , snake_case=False , snake_case=0 , snake_case=1 , snake_case=False , snake_case=True , **snake_case , ) -> str: """simple docstring""" a__ : Any = vocab_size a__ : List[str] = hidden_size a__ : Tuple = d_kv a__ : Dict = d_ff a__ : str = num_layers a__ : Any = num_heads a__ : Optional[Any] = relative_attention_num_buckets a__ : List[Any] = relative_attention_max_distance a__ : str = dropout_rate a__ : Tuple = layer_norm_epsilon a__ : List[Any] = initializer_factor a__ : Optional[int] = use_cache a__ : str = eos_token_id a__ : int = decoder_start_token_id # for backwards compatibility a__ : int = dense_act_fn super().__init__( pad_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , tie_word_embeddings=_A , is_decoder=_A , **_A , ) @classmethod def _snake_case ( cls , snake_case , **snake_case ) -> List[str]: """simple docstring""" cls._set_token_in_kwargs(_A ) a__ , a__ : List[Any] = cls.get_config_dict(_A , **_A ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": a__ : Union[str, Any] = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_A , **_A ) class __lowerCAmelCase ( __lowercase ): _UpperCamelCase : int = """pix2struct_vision_model""" def __init__( self , snake_case=768 , snake_case=768 , snake_case=2_048 , snake_case=64 , snake_case=12 , snake_case=12 , snake_case="gelu_new" , snake_case=1E-6 , snake_case=0.0 , snake_case=0.0 , snake_case=1E-10 , snake_case=1.0 , snake_case=4_096 , snake_case=32 , snake_case=128 , **snake_case , ) -> int: """simple docstring""" super().__init__(**_A ) a__ : str = hidden_size a__ : List[str] = patch_embed_hidden_size a__ : List[str] = d_ff a__ : int = dropout_rate a__ : Union[str, Any] = num_hidden_layers a__ : Tuple = num_attention_heads a__ : List[Any] = initializer_range a__ : Optional[int] = initializer_factor a__ : Optional[Any] = attention_dropout a__ : List[Any] = layer_norm_eps a__ : List[Any] = dense_act_fn a__ : Tuple = seq_len a__ : Dict = relative_attention_num_buckets a__ : Optional[Any] = relative_attention_max_distance a__ : Optional[Any] = d_kv @classmethod def _snake_case ( cls , snake_case , **snake_case ) -> Union[str, Any]: """simple docstring""" cls._set_token_in_kwargs(_A ) a__ , a__ : Optional[int] = cls.get_config_dict(_A , **_A ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": a__ : Optional[int] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_A , **_A ) class __lowerCAmelCase ( __lowercase ): _UpperCamelCase : Tuple = """pix2struct""" _UpperCamelCase : List[str] = True def __init__( self , snake_case=None , snake_case=None , snake_case=1.0 , snake_case=0.02 , snake_case=False , snake_case=False , snake_case=True , **snake_case , ) -> Any: """simple docstring""" super().__init__(tie_word_embeddings=_A , is_encoder_decoder=_A , **_A ) if text_config is None: a__ : str = {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: a__ : Any = {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) a__ : Dict = PixaStructTextConfig(**_A ) a__ : Optional[Any] = PixaStructVisionConfig(**_A ) a__ : Optional[Any] = self.text_config.decoder_start_token_id a__ : Tuple = self.text_config.pad_token_id a__ : List[str] = self.text_config.eos_token_id a__ : List[Any] = initializer_factor a__ : Any = initializer_range a__ : Dict = self.initializer_range a__ : Dict = self.initializer_range a__ : str = is_vqa @classmethod def _snake_case ( cls , snake_case , snake_case , **snake_case ) -> str: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Tuple = copy.deepcopy(self.__dict__ ) a__ : int = self.text_config.to_dict() a__ : str = self.vision_config.to_dict() a__ : Any = self.__class__.model_type return output
705
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowerCAmelCase ( _UpperCamelCase ): @require_torch def _snake_case ( self ) -> str: """simple docstring""" a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__ : Tuple = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__ : Dict = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Tuple = "1" a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__ : List[Any] = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__ : Any = self.get_env() a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n " a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n " a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " # baseline - just load from_pretrained with normal network a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a__ : str = self.get_env() a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # next emulate no network a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Union[str, Any] = "1" a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Optional[Any] = "\nfrom transformers import pipeline\n " a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n " a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " a__ : List[str] = self.get_env() a__ : Union[str, Any] = "1" a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )] a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , ) @require_torch def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Any = "\nfrom transformers import AutoModel\n " a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n " # baseline - just load from_pretrained with normal network a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a__ : Optional[Any] = self.get_env() a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Dict = "1" a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() )
629
0
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) @add_end_docstrings(__a ) class __lowerCAmelCase ( __a ): def __init__( self , **snake_case ) -> Optional[Any]: """simple docstring""" super().__init__(**snake_case__ ) requires_backends(self , "vision" ) requires_backends(self , "torch" ) if self.framework != "pt": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" ) self.check_model_type(snake_case__ ) def _snake_case ( self , **snake_case ) -> str: """simple docstring""" a__ : Any = {} a__ : str = {} a__ : Any = {} # preprocess args if "points_per_batch" in kwargs: a__ : Dict = kwargs["points_per_batch"] if "points_per_crop" in kwargs: a__ : Union[str, Any] = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: a__ : str = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: a__ : Optional[int] = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: a__ : Tuple = kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: a__ : List[Any] = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: a__ : Any = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: a__ : Dict = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: a__ : str = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: a__ : Dict = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: a__ : int = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: a__ : List[Any] = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self , snake_case , *snake_case , snake_case=None , snake_case=None , **snake_case ) -> Optional[int]: """simple docstring""" return super().__call__(snake_case__ , *snake_case__ , num_workers=snake_case__ , batch_size=snake_case__ , **snake_case__ ) def _snake_case ( self , snake_case , snake_case=64 , snake_case = 0 , snake_case = 512 / 1_500 , snake_case = 32 , snake_case = 1 , ) -> Optional[Any]: """simple docstring""" a__ : Optional[Any] = load_image(snake_case__ ) a__ : str = self.image_processor.size["longest_edge"] a__ , a__ , a__ , a__ : str = self.image_processor.generate_crop_boxes( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) a__ : List[Any] = self.image_processor(images=snake_case__ , return_tensors="pt" ) with self.device_placement(): if self.framework == "pt": a__ : List[str] = self.get_inference_context() with inference_context(): a__ : Any = self._ensure_tensor_on_device(snake_case__ , device=self.device ) a__ : Tuple = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) ) a__ : Optional[Any] = image_embeddings a__ : Dict = grid_points.shape[1] a__ : int = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0 , snake_case__ , snake_case__ ): a__ : Dict = grid_points[:, i : i + points_per_batch, :, :] a__ : List[Any] = input_labels[:, i : i + points_per_batch] a__ : Tuple = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _snake_case ( self , snake_case , snake_case=0.88 , snake_case=0.95 , snake_case=0 , snake_case=1 , ) -> str: """simple docstring""" a__ : str = model_inputs.pop("input_boxes" ) a__ : Optional[Any] = model_inputs.pop("is_last" ) a__ : Union[str, Any] = model_inputs.pop("original_sizes" ).tolist() a__ : Optional[Any] = model_inputs.pop("reshaped_input_sizes" ).tolist() a__ : str = self.model(**snake_case__ ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks a__ : str = model_outputs["pred_masks"] a__ : List[str] = self.image_processor.post_process_masks( snake_case__ , snake_case__ , snake_case__ , snake_case__ , binarize=snake_case__ ) a__ : Any = model_outputs["iou_scores"] a__ , a__ , a__ : Optional[int] = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _snake_case ( self , snake_case , snake_case=False , snake_case=False , snake_case=0.7 , ) -> List[str]: """simple docstring""" a__ : Dict = [] a__ : Optional[int] = [] a__ : Any = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores" ) ) all_masks.extend(model_output.pop("masks" ) ) all_boxes.append(model_output.pop("boxes" ) ) a__ : str = torch.cat(snake_case__ ) a__ : List[Any] = torch.cat(snake_case__ ) a__ , a__ , a__ , a__ : int = self.image_processor.post_process_for_mask_generation( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) a__ : Tuple = defaultdict(snake_case__ ) for output in model_outputs: for k, v in output.items(): extra[k].append(snake_case__ ) a__ : List[Any] = {} if output_rle_mask: a__ : Any = rle_mask if output_bboxes_mask: a__ : int = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
706
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ """VAN_PRETRAINED_MODEL_ARCHIVE_LIST""", """VanForImageClassification""", """VanModel""", """VanPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
629
0
from __future__ import annotations def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): if days_between_payments <= 0: raise ValueError("days_between_payments must be > 0" ) if daily_interest_rate < 0: raise ValueError("daily_interest_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * daily_interest_rate * days_between_payments def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ): if number_of_compounding_periods <= 0: raise ValueError("number_of_compounding_periods must be > 0" ) if nominal_annual_interest_rate_percentage < 0: raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ): if number_of_years <= 0: raise ValueError("number_of_years must be > 0" ) if nominal_annual_percentage_rate < 0: raise ValueError("nominal_annual_percentage_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return compound_interest( lowerCAmelCase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
707
from PIL import Image def _A ( lowerCamelCase , lowerCamelCase ): def brightness(lowerCamelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("level must be between -255.0 (black) and 255.0 (white)" ) return img.point(lowerCamelCase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
629
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = { """ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""", } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : List[Any] = """deta""" _UpperCamelCase : Dict = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , snake_case=None , snake_case=900 , snake_case=2_048 , snake_case=6 , snake_case=2_048 , snake_case=8 , snake_case=6 , snake_case=1_024 , snake_case=8 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=True , snake_case=False , snake_case="sine" , snake_case=5 , snake_case=4 , snake_case=4 , snake_case=True , snake_case=300 , snake_case=True , snake_case=True , snake_case=1 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=5 , snake_case=2 , snake_case=0.1 , snake_case=0.25 , **snake_case , ) -> Union[str, Any]: """simple docstring""" if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(snake_case , snake_case ): a__ : Optional[int] = backbone_config.pop("model_type" ) a__ : List[str] = CONFIG_MAPPING[backbone_model_type] a__ : Any = config_class.from_dict(snake_case ) a__ : int = backbone_config a__ : Any = num_queries a__ : List[str] = max_position_embeddings a__ : Dict = d_model a__ : Optional[Any] = encoder_ffn_dim a__ : Optional[Any] = encoder_layers a__ : Tuple = encoder_attention_heads a__ : Optional[Any] = decoder_ffn_dim a__ : List[Any] = decoder_layers a__ : Union[str, Any] = decoder_attention_heads a__ : Optional[Any] = dropout a__ : List[Any] = attention_dropout a__ : Any = activation_dropout a__ : List[str] = activation_function a__ : Dict = init_std a__ : int = init_xavier_std a__ : List[str] = encoder_layerdrop a__ : str = auxiliary_loss a__ : Union[str, Any] = position_embedding_type # deformable attributes a__ : Dict = num_feature_levels a__ : List[Any] = encoder_n_points a__ : Any = decoder_n_points a__ : List[str] = two_stage a__ : Dict = two_stage_num_proposals a__ : str = with_box_refine a__ : Dict = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher a__ : Tuple = class_cost a__ : Optional[int] = bbox_cost a__ : Optional[int] = giou_cost # Loss coefficients a__ : List[Any] = mask_loss_coefficient a__ : Union[str, Any] = dice_loss_coefficient a__ : List[str] = bbox_loss_coefficient a__ : Optional[Any] = giou_loss_coefficient a__ : Any = eos_coefficient a__ : Optional[Any] = focal_alpha super().__init__(is_encoder_decoder=snake_case , **snake_case ) @property def _snake_case ( self ) -> Any: """simple docstring""" return self.encoder_attention_heads @property def _snake_case ( self ) -> Optional[int]: """simple docstring""" return self.d_model def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Any = copy.deepcopy(self.__dict__ ) a__ : List[str] = self.backbone_config.to_dict() a__ : Optional[int] = self.__class__.model_type return output
708
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration SCREAMING_SNAKE_CASE__ : List[str] = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def _A ( lowerCamelCase ): a__ : Optional[int] = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(lowerCamelCase , lowerCamelCase ) SCREAMING_SNAKE_CASE__ : List[str] = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def _A ( lowerCamelCase ): a__ : Tuple = list(s_dict.keys() ) for key in keys: a__ : Optional[Any] = key for k, v in WHISPER_MAPPING.items(): if k in key: a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase ) print(F"""{key} -> {new_key}""" ) a__ : Dict = s_dict.pop(lowerCamelCase ) return s_dict def _A ( lowerCamelCase ): a__ , a__ : Any = emb.weight.shape a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) a__ : Optional[Any] = emb.weight.data return lin_layer def _A ( lowerCamelCase , lowerCamelCase ): os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) a__ : Optional[Any] = os.path.basename(lowerCamelCase ) a__ : List[Any] = url.split("/" )[-2] a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase ) if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ): raise RuntimeError(F"""{download_target} exists and is not a regular file""" ) if os.path.isfile(lowerCamelCase ): a__ : Any = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" ) with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output: with tqdm( total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop: while True: a__ : Optional[Any] = source.read(8192 ) if not buffer: break output.write(lowerCamelCase ) loop.update(len(lowerCamelCase ) ) a__ : Optional[int] = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes def _A ( lowerCamelCase , lowerCamelCase ): if ".pt" not in checkpoint_path: a__ : str = _download(_MODELS[checkpoint_path] ) else: a__ : str = torch.load(lowerCamelCase , map_location="cpu" ) a__ : Dict = original_checkpoint["dims"] a__ : Optional[int] = original_checkpoint["model_state_dict"] a__ : Any = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(lowerCamelCase ) rename_keys(lowerCamelCase ) a__ : Optional[Any] = True a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0] a__ : Tuple = WhisperConfig( vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , ) a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase ) a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F""" but all the following weights are missing {missing}""" ) if tie_embeds: a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens ) else: a__ : str = proj_out_weights model.save_pretrained(lowerCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() # # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
629
0
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = False , **snake_case , ) -> List[Any]: """simple docstring""" super().__init__(features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) a__ : Dict = Sql( cache_dir=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , sql=_SCREAMING_SNAKE_CASE , con=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : List[Any] = None a__ : Dict = None a__ : Any = None a__ : Dict = None self.builder.download_and_prepare( download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , ) # Build dataset for splits a__ : Tuple = self.builder.as_dataset( split="train" , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory ) return dataset class __lowerCAmelCase : def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = None , **snake_case , ) -> List[Any]: """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) a__ : List[Any] = dataset a__ : Dict = name a__ : Union[str, Any] = con a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a__ : Union[str, Any] = num_proc a__ : List[str] = to_sql_kwargs def _snake_case ( self ) -> int: """simple docstring""" a__ : Union[str, Any] = self.to_sql_kwargs.pop("sql" , _SCREAMING_SNAKE_CASE ) a__ : Tuple = self.to_sql_kwargs.pop("con" , _SCREAMING_SNAKE_CASE ) a__ : str = self.to_sql_kwargs.pop("index" , _SCREAMING_SNAKE_CASE ) a__ : Union[str, Any] = self._write(index=_SCREAMING_SNAKE_CASE , **self.to_sql_kwargs ) return written def _snake_case ( self , snake_case ) -> Optional[int]: """simple docstring""" a__ , a__ , a__ : Union[str, Any] = args a__ : Optional[Any] = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs a__ : int = query_table( table=self.dataset.data , key=slice(_SCREAMING_SNAKE_CASE , offset + self.batch_size ) , indices=self.dataset._indices , ) a__ : Union[str, Any] = batch.to_pandas() a__ : Tuple = df.to_sql(self.name , self.con , index=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) return num_rows or len(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , snake_case , **snake_case ) -> int: """simple docstring""" a__ : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a__ , a__ : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
709
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[Any] = """informer""" _UpperCamelCase : Any = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]: """simple docstring""" a__ : Optional[Any] = prediction_length a__ : Optional[int] = context_length or prediction_length a__ : Optional[int] = distribution_output a__ : str = loss a__ : Optional[Any] = input_size a__ : int = num_time_features a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] a__ : Optional[int] = scaling a__ : List[str] = num_dynamic_real_features a__ : Optional[int] = num_static_real_features a__ : Optional[int] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) a__ : List[Any] = cardinality else: a__ : Tuple = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) a__ : Tuple = embedding_dimension else: a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] a__ : Optional[Any] = num_parallel_samples # Transformer architecture configuration a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features a__ : Union[str, Any] = d_model a__ : Any = encoder_attention_heads a__ : Optional[Any] = decoder_attention_heads a__ : int = encoder_ffn_dim a__ : List[Any] = decoder_ffn_dim a__ : List[str] = encoder_layers a__ : Any = decoder_layers a__ : List[str] = dropout a__ : int = attention_dropout a__ : List[Any] = activation_dropout a__ : Optional[int] = encoder_layerdrop a__ : Tuple = decoder_layerdrop a__ : Any = activation_function a__ : Tuple = init_std a__ : Optional[int] = use_cache # Informer a__ : Union[str, Any] = attention_type a__ : List[str] = sampling_factor a__ : Optional[int] = distil super().__init__(is_encoder_decoder=snake_case , **snake_case ) @property def _snake_case ( self ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
629
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { 'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json', } class __lowerCAmelCase ( _A ): _UpperCamelCase : str = """mra""" def __init__( self , snake_case=50_265 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=1 , snake_case=0.02 , snake_case=1E-5 , snake_case="absolute" , snake_case=4 , snake_case="full" , snake_case=0 , snake_case=0 , snake_case=1 , snake_case=0 , snake_case=2 , **snake_case , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) a__ : List[str] = vocab_size a__ : Dict = max_position_embeddings a__ : Dict = hidden_size a__ : Any = num_hidden_layers a__ : List[Any] = num_attention_heads a__ : Dict = intermediate_size a__ : str = hidden_act a__ : Optional[Any] = hidden_dropout_prob a__ : int = attention_probs_dropout_prob a__ : Union[str, Any] = initializer_range a__ : str = type_vocab_size a__ : Any = layer_norm_eps a__ : int = position_embedding_type a__ : Dict = block_per_row a__ : Dict = approx_mode a__ : int = initial_prior_first_n_blocks a__ : List[str] = initial_prior_diagonal_n_blocks
710
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random""" SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random""" @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> List[Any]: """simple docstring""" return AutoConfig.from_pretrained(snake_case ) def _snake_case ( self ) -> Any: """simple docstring""" a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _snake_case ( self ) -> str: """simple docstring""" a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _snake_case ( self ) -> int: """simple docstring""" with self.assertRaises(snake_case ): create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
629
0
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): def _snake_case ( self , snake_case , snake_case ) -> Optional[Any]: """simple docstring""" a__ : str = jnp.ones((batch_size, length) ) / length return scores def _snake_case ( self ) -> Any: """simple docstring""" a__ : Union[str, Any] = None a__ : List[Any] = 20 a__ : str = self._get_uniform_logits(batch_size=2 , length=_A ) # tweak scores to not be uniform anymore a__ : int = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch a__ : int = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax a__ : List[str] = jax.nn.softmax(_A , axis=-1 ) a__ : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 ) a__ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=1.3 ) a__ : Tuple = jax.nn.softmax(temp_dist_warper_sharper(_A , scores.copy() , cur_len=_A ) , axis=-1 ) a__ : str = jax.nn.softmax(temp_dist_warper_smoother(_A , scores.copy() , cur_len=_A ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : List[str] = None a__ : Dict = 10 a__ : Optional[Any] = 2 # create ramp distribution a__ : Union[str, Any] = np.broadcast_to(np.arange(_A )[None, :] , (batch_size, vocab_size) ).copy() a__ : Any = ramp_logits[1:, : vocab_size // 2] + vocab_size a__ : List[Any] = FlaxTopKLogitsWarper(3 ) a__ : List[Any] = top_k_warp(_A , _A , cur_len=_A ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case a__ : List[str] = 5 a__ : int = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) a__ : List[str] = np.broadcast_to(np.arange(_A )[None, :] , (batch_size, length) ).copy() a__ : Optional[int] = top_k_warp_safety_check(_A , _A , cur_len=_A ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Tuple = None a__ : Optional[Any] = 10 a__ : Optional[int] = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) a__ : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) a__ : Any = FlaxTopPLogitsWarper(0.8 ) a__ : Any = np.exp(top_p_warp(_A , _A , cur_len=_A ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 a__ : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # check edge cases with negative and extreme logits a__ : str = np.broadcast_to(np.arange(_A )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme a__ : Any = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept a__ : Optional[Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) a__ : int = top_p_warp(_A , _A , cur_len=_A ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = 20 a__ : List[Any] = 4 a__ : Dict = 0 a__ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_A ) # check that min length is applied at length 5 a__ : Optional[int] = ids_tensor((batch_size, 20) , vocab_size=20 ) a__ : str = 5 a__ : Union[str, Any] = self._get_uniform_logits(_A , _A ) a__ : str = min_dist_processor(_A , _A , cur_len=_A ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] ) # check that min length is not applied anymore at length 15 a__ : Tuple = self._get_uniform_logits(_A , _A ) a__ : Any = 15 a__ : Dict = min_dist_processor(_A , _A , cur_len=_A ) self.assertFalse(jnp.isinf(_A ).any() ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Optional[Any] = 20 a__ : int = 4 a__ : int = 0 a__ : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A ) # check that all scores are -inf except the bos_token_id score a__ : Optional[int] = ids_tensor((batch_size, 1) , vocab_size=20 ) a__ : Tuple = 1 a__ : Tuple = self._get_uniform_logits(_A , _A ) a__ : Optional[int] = logits_processor(_A , _A , cur_len=_A ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 a__ : str = 3 a__ : Optional[int] = self._get_uniform_logits(_A , _A ) a__ : Tuple = logits_processor(_A , _A , cur_len=_A ) self.assertFalse(jnp.isinf(_A ).any() ) def _snake_case ( self ) -> int: """simple docstring""" a__ : List[Any] = 20 a__ : Optional[Any] = 4 a__ : Dict = 0 a__ : Optional[Any] = 5 a__ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A ) # check that all scores are -inf except the eos_token_id when max_length is reached a__ : int = ids_tensor((batch_size, 4) , vocab_size=20 ) a__ : int = 4 a__ : Union[str, Any] = self._get_uniform_logits(_A , _A ) a__ : List[str] = logits_processor(_A , _A , cur_len=_A ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached a__ : Any = 3 a__ : Union[str, Any] = self._get_uniform_logits(_A , _A ) a__ : Optional[Any] = logits_processor(_A , _A , cur_len=_A ) self.assertFalse(jnp.isinf(_A ).any() ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : int = 4 a__ : List[str] = 10 a__ : int = 15 a__ : Any = 2 a__ : Dict = 1 a__ : Optional[int] = 15 # dummy input_ids and scores a__ : Union[str, Any] = ids_tensor((batch_size, sequence_length) , _A ) a__ : str = input_ids.copy() a__ : Union[str, Any] = self._get_uniform_logits(_A , _A ) a__ : int = scores.copy() # instantiate all dist processors a__ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 ) a__ : Optional[int] = FlaxTopKLogitsWarper(3 ) a__ : Union[str, Any] = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors a__ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_A ) a__ : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A ) a__ : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A ) a__ : List[str] = 10 # no processor list a__ : Dict = temp_dist_warp(_A , _A , cur_len=_A ) a__ : Optional[int] = top_k_warp(_A , _A , cur_len=_A ) a__ : Dict = top_p_warp(_A , _A , cur_len=_A ) a__ : int = min_dist_proc(_A , _A , cur_len=_A ) a__ : List[Any] = bos_dist_proc(_A , _A , cur_len=_A ) a__ : Optional[int] = eos_dist_proc(_A , _A , cur_len=_A ) # with processor list a__ : Dict = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) a__ : List[str] = processor(_A , _A , cur_len=_A ) # scores should be equal self.assertTrue(jnp.allclose(_A , _A , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def _snake_case ( self ) -> int: """simple docstring""" a__ : int = 4 a__ : List[Any] = 10 a__ : Tuple = 15 a__ : Union[str, Any] = 2 a__ : Optional[int] = 1 a__ : List[str] = 15 # dummy input_ids and scores a__ : int = ids_tensor((batch_size, sequence_length) , _A ) a__ : Optional[int] = input_ids.copy() a__ : Optional[Any] = self._get_uniform_logits(_A , _A ) a__ : List[Any] = scores.copy() # instantiate all dist processors a__ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 ) a__ : Optional[int] = FlaxTopKLogitsWarper(3 ) a__ : Tuple = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors a__ : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_A ) a__ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A ) a__ : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A ) a__ : List[str] = 10 # no processor list def run_no_processor_list(snake_case , snake_case , snake_case ): a__ : Optional[Any] = temp_dist_warp(_A , _A , cur_len=_A ) a__ : Optional[Any] = top_k_warp(_A , _A , cur_len=_A ) a__ : List[Any] = top_p_warp(_A , _A , cur_len=_A ) a__ : Optional[Any] = min_dist_proc(_A , _A , cur_len=_A ) a__ : Tuple = bos_dist_proc(_A , _A , cur_len=_A ) a__ : Dict = eos_dist_proc(_A , _A , cur_len=_A ) return scores # with processor list def run_processor_list(snake_case , snake_case , snake_case ): a__ : Optional[Any] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) a__ : List[Any] = processor(_A , _A , cur_len=_A ) return scores a__ : Dict = jax.jit(_A ) a__ : Optional[Any] = jax.jit(_A ) a__ : List[str] = jitted_run_no_processor_list(_A , _A , _A ) a__ : List[Any] = jitted_run_processor_list(_A , _A , _A ) # scores should be equal self.assertTrue(jnp.allclose(_A , _A , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
711
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1""" SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2""" SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3""" SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4""" class __lowerCAmelCase ( _UpperCamelCase ): def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any: """simple docstring""" super()._init_() a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : int = StableDiffusionPipeline( vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _snake_case ( self ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )} def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a__ : List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def _snake_case ( self ) -> Tuple: """simple docstring""" self.enable_attention_slicing(snake_case ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" a__ : Any = "cuda" if torch.cuda.is_available() else "cpu" self.to(snake_case ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 a__ : Any = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.2 a__ : List[Any] = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.3 a__ : Optional[Any] = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.4 a__ : Dict = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
629
0
from math import asin, atan, cos, radians, sin, sqrt, tan SCREAMING_SNAKE_CASE__ : Tuple = 6_3_7_8_1_3_7.0 SCREAMING_SNAKE_CASE__ : Any = 6_3_5_6_7_5_2.3_1_4_2_4_5 SCREAMING_SNAKE_CASE__ : Dict = 6_3_7_8_1_3_7 def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : Tuple = (AXIS_A - AXIS_B) / AXIS_A a__ : List[Any] = atan((1 - flattening) * tan(radians(__snake_case ) ) ) a__ : str = atan((1 - flattening) * tan(radians(__snake_case ) ) ) a__ : Dict = radians(__snake_case ) a__ : Dict = radians(__snake_case ) # Equation a__ : List[str] = sin((phi_a - phi_a) / 2 ) a__ : List[str] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda a__ : Any = sqrt(sin_sq_phi + (cos(__snake_case ) * cos(__snake_case ) * sin_sq_lambda) ) return 2 * RADIUS * asin(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
712
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665 def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ): if fluid_density <= 0: raise ValueError("Impossible fluid density" ) if volume < 0: raise ValueError("Impossible Object volume" ) if gravity <= 0: raise ValueError("Impossible Gravity" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
629
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : List[Any] = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
713
from __future__ import annotations from random import random class __lowerCAmelCase : def __init__( self , snake_case = None ) -> Any: """simple docstring""" a__ : Optional[int] = value a__ : Tuple = random() a__ : Node | None = None a__ : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F"""'{self.value}: {self.prior:.5}'""" else: return pformat( {F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" a__ : List[Any] = str(self.value ) + " " a__ : List[str] = str(self.left or "" ) a__ : Tuple = str(self.right or "" ) return value + left + right def _A ( lowerCamelCase , lowerCamelCase ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: a__ , a__ : Dict = split(root.left , lowerCamelCase ) return left, root else: a__ , a__ : int = split(root.right , lowerCamelCase ) return root, right def _A ( lowerCamelCase , lowerCamelCase ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: a__ : List[Any] = merge(left.right , lowerCamelCase ) return left else: a__ : int = merge(lowerCamelCase , right.left ) return right def _A ( lowerCamelCase , lowerCamelCase ): a__ : Any = Node(lowerCamelCase ) a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase ) return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) def _A ( lowerCamelCase , lowerCamelCase ): a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 ) a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase ) return merge(lowerCamelCase , lowerCamelCase ) def _A ( lowerCamelCase ): if not root: # None return else: inorder(root.left ) print(root.value , end="," ) inorder(root.right ) def _A ( lowerCamelCase , lowerCamelCase ): for arg in args.split(): if arg[0] == "+": a__ : int = insert(lowerCamelCase , int(arg[1:] ) ) elif arg[0] == "-": a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) ) else: print("Unknown command" ) return root def _A ( ): a__ : List[str] = None print( "enter numbers to create a tree, + value to add value into treap, " "- value to erase all nodes with value. 'q' to quit. " ) a__ : int = input() while args != "q": a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase ) print(lowerCamelCase ) a__ : Optional[Any] = input() print("good by!" ) if __name__ == "__main__": import doctest doctest.testmod() main()
629
0
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
714
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ): _UpperCamelCase : Optional[int] = StableUnCLIPPipeline _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _UpperCamelCase : Any = False def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Any = 32 a__ : int = embedder_hidden_size # prior components torch.manual_seed(0 ) a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) a__ : Optional[Any] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) a__ : int = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , ) torch.manual_seed(0 ) a__ : str = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case ) a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) a__ : Union[str, Any] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) a__ : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , ) torch.manual_seed(0 ) a__ : Tuple = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , ) torch.manual_seed(0 ) a__ : Optional[int] = AutoencoderKL() a__ : Any = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _snake_case ( self , snake_case , snake_case=0 ) -> Dict: """simple docstring""" if str(snake_case ).startswith("mps" ): a__ : Union[str, Any] = torch.manual_seed(snake_case ) else: a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case ) a__ : Any = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Dict = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=snake_case ) def _snake_case ( self ) -> int: """simple docstring""" a__ : int = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=snake_case ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def _snake_case ( self ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 ) a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" ) a__ : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case , snake_case ) def _snake_case ( self ) -> Tuple: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) a__ : Union[str, Any] = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a__ : Union[str, Any] = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) a__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
629
0
import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __lowerCAmelCase : def __init__( self , snake_case , snake_case , snake_case ) -> str: """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("Destination width/height should be > 0" ) a__ : int = img a__ : Optional[Any] = img.shape[1] a__ : Tuple = img.shape[0] a__ : Tuple = dst_width a__ : List[Any] = dst_height a__ : Optional[int] = self.src_w / self.dst_w a__ : Union[str, Any] = self.src_h / self.dst_h a__ : List[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def _snake_case ( self ) -> Dict: """simple docstring""" for i in range(self.dst_h ): for j in range(self.dst_w ): a__ : Tuple = self.img[self.get_y(snake_case )][self.get_x(snake_case )] def _snake_case ( self , snake_case ) -> Optional[Any]: """simple docstring""" return int(self.ratio_x * x ) def _snake_case ( self , snake_case ) -> int: """simple docstring""" return int(self.ratio_y * y ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = 8_0_0, 6_0_0 SCREAMING_SNAKE_CASE__ : Optional[int] = imread("""image_data/lena.jpg""", 1) SCREAMING_SNAKE_CASE__ : Dict = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output ) waitKey(0) destroyAllWindows()
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : str = { """configuration_distilbert""": [ """DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DistilBertConfig""", """DistilBertOnnxConfig""", ], """tokenization_distilbert""": ["""DistilBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ """DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DistilBertForMaskedLM""", """DistilBertForMultipleChoice""", """DistilBertForQuestionAnswering""", """DistilBertForSequenceClassification""", """DistilBertForTokenClassification""", """DistilBertModel""", """DistilBertPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = [ """TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDistilBertForMaskedLM""", """TFDistilBertForMultipleChoice""", """TFDistilBertForQuestionAnswering""", """TFDistilBertForSequenceClassification""", """TFDistilBertForTokenClassification""", """TFDistilBertMainLayer""", """TFDistilBertModel""", """TFDistilBertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any = [ """FlaxDistilBertForMaskedLM""", """FlaxDistilBertForMultipleChoice""", """FlaxDistilBertForQuestionAnswering""", """FlaxDistilBertForSequenceClassification""", """FlaxDistilBertForTokenClassification""", """FlaxDistilBertModel""", """FlaxDistilBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.getLogger(__name__) SCREAMING_SNAKE_CASE__ : int = """pytorch_model.bin""" @dataclasses.dataclass class __lowerCAmelCase : _UpperCamelCase : List[Any] = dataclasses.field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} ) _UpperCamelCase : Optional[int] = dataclasses.field( default=_UpperCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} ,) @dataclasses.dataclass class __lowerCAmelCase : _UpperCamelCase : Any = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} ) _UpperCamelCase : Dict = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} ) _UpperCamelCase : Optional[Any] = dataclasses.field( default=_UpperCamelCase ,metadata={"""help""": """A csv or a json file containing the validation data."""} ) _UpperCamelCase : List[str] = dataclasses.field( default=_UpperCamelCase ,metadata={"""help""": """The name of the task to train on."""} ,) _UpperCamelCase : Tuple = dataclasses.field( default=_UpperCamelCase ,metadata={"""help""": """The list of labels for the task."""} ) @dataclasses.dataclass class __lowerCAmelCase : _UpperCamelCase : List[Any] = dataclasses.field( metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} ) _UpperCamelCase : Tuple = dataclasses.field( default="""accuracy""" ,metadata={"""help""": """The evaluation metric used for the task."""} ) _UpperCamelCase : Union[str, Any] = dataclasses.field( default="""no""" ,metadata={ """help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]""" } ,) _UpperCamelCase : Tuple = dataclasses.field( default=10 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,) _UpperCamelCase : Any = dataclasses.field( default=0.0 ,metadata={ """help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions.""" } ,) _UpperCamelCase : Optional[Any] = dataclasses.field( default=_UpperCamelCase ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} ,) _UpperCamelCase : Union[str, Any] = dataclasses.field( default=_UpperCamelCase ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} ,) _UpperCamelCase : Dict = dataclasses.field( default=_UpperCamelCase ,metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} ,) _UpperCamelCase : Optional[int] = dataclasses.field( default=0.0 ,metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} ,) _UpperCamelCase : Dict = dataclasses.field( default=100 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,) _UpperCamelCase : Optional[int] = dataclasses.field( default=_UpperCamelCase ,metadata={"""help""": """Random seed for initialization."""} ,) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : Any = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: a__ : Any = dataset.filter(lambda lowerCamelCase : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 a__ : Tuple = int(eval_result * len(a__ ) ) print(a__ ) a__ : Any = dataset.sort("probability" , reverse=a__ ) a__ : int = dataset.select(range(a__ ) ) a__ : str = dataset.remove_columns(["label", "probability"] ) a__ : Tuple = dataset.rename_column("prediction" , "label" ) a__ : List[Any] = dataset.map(lambda lowerCamelCase : {"label": idalabel[example["label"]]} ) a__ : Any = dataset.shuffle(seed=args.seed ) a__ : str = os.path.join(a__ , F"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(a__ , index=a__ ) else: dataset.to_json(a__ ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): a__ : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() a__ : List[str] = STModelArguments(model_name_or_path=a__ ) a__ : List[str] = STDataArguments(train_file=a__ , infer_file=a__ ) a__ : Optional[int] = STTrainingArguments(output_dir=a__ ) a__ : Optional[Any] = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(a__ ).items(): setattr(a__ , a__ , a__ ) for key, value in kwargs.items(): if hasattr(a__ , a__ ): setattr(a__ , a__ , a__ ) # Sanity checks a__ : str = {} a__ : Optional[int] = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None a__ : Optional[int] = args.train_file a__ : Any = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None a__ : List[str] = args.eval_file for key in data_files: a__ : Tuple = data_files[key].split("." )[-1] assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: a__ : Any = extension else: assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("Creating the initial data directory for self-training..." ) a__ : str = F"""{args.output_dir}/self-train_iter-{{}}""".format a__ : List[str] = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=a__ ) os.makedirs(a__ , exist_ok=a__ ) accelerator.wait_for_everyone() a__ : Union[str, Any] = None a__ : Union[str, Any] = None a__ : Optional[int] = 0 a__ : str = False # Show the progress bar a__ : List[str] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): a__ : Optional[Any] = data_dir_format(a__ ) assert os.path.exists(a__ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 a__ : Union[str, Any] = os.path.join(a__ , "stage-1" ) a__ : int = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(a__ , a__ ): arguments_dict.update({key: value} ) a__ : Dict = os.path.join(a__ , "best-checkpoint" , a__ ) if os.path.exists(a__ ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , a__ , a__ , ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , a__ ) finetune(**a__ ) accelerator.wait_for_everyone() assert os.path.exists(a__ ) logger.info("Self-training job completed: iteration: %d, stage: 1." , a__ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data a__ : Tuple = os.path.join(a__ , "best-checkpoint" ) a__ : Dict = os.path.join(a__ , "stage-2" ) # Update arguments_dict a__ : str = model_path a__ : Tuple = data_files["train"] a__ : List[Any] = current_output_dir a__ : List[Any] = os.path.join(a__ , "best-checkpoint" , a__ ) if os.path.exists(a__ ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , a__ , a__ , ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , a__ ) finetune(**a__ ) accelerator.wait_for_everyone() assert os.path.exists(a__ ) logger.info("Self-training job completed: iteration: %d, stage: 2." , a__ ) a__ : Union[str, Any] = iteration a__ : List[Any] = data_dir_format(iteration + 1 ) a__ : Optional[int] = AutoConfig.from_pretrained(os.path.join(a__ , "best-checkpoint" ) ) a__ : int = config.idalabel a__ : Optional[Any] = os.path.join(a__ , "eval_results_best-checkpoint.json" ) a__ : Optional[int] = os.path.join(a__ , "test_results_best-checkpoint.json" ) assert os.path.exists(a__ ) with open(a__ , "r" ) as f: a__ : Any = float(json.load(a__ )[args.eval_metric] ) a__ : Optional[Any] = os.path.join(a__ , "infer_output_best-checkpoint.csv" ) assert os.path.exists(a__ ) # Loading the dataset from local csv or json files. a__ : Optional[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"] a__ : List[Any] = load_dataset("csv" , data_files={"data": infer_output_file} )["data"] if accelerator.is_main_process: os.makedirs(a__ , exist_ok=a__ ) shutil.copy(a__ , os.path.join(a__ , F"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(a__ ): shutil.copy(a__ , os.path.join(a__ , F"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(a__ , a__ , a__ , a__ , a__ , a__ ) accelerator.wait_for_everyone() a__ : Any = os.path.join(a__ , F"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: a__ : int = eval_result if best_iteration is None: a__ : List[str] = new_iteration a__ : Dict = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: a__ : str = new_iteration a__ : str = new_eval_result a__ : Optional[Any] = 0 else: if new_eval_result == best_eval_result: a__ : Dict = new_iteration a__ : Optional[Any] = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: a__ : Optional[Any] = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d" , a__ ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , a__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(a__ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(a__ , "eval_results_best-iteration.json" ) , ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , a__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(a__ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(a__ , "eval_results_best-iteration.json" ) , )
716
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def _A ( lowerCamelCase ): # A local function to see if a dot lands in the circle. def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool: a__ : Any = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle a__ : Union[str, Any] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCamelCase ) ) # The ratio of the area for circle to square is pi/4. a__ : Any = proportion * 4 print(F"""The estimated value of pi is {pi_estimate}""" ) print(F"""The numpy value of pi is {pi}""" ) print(F"""The total error is {abs(pi - pi_estimate )}""" ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ): return mean( function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value) def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ): def identity_function(lowerCamelCase ) -> float: return x a__ : int = area_under_curve_estimator( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) a__ : int = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {expected_value}""" ) print(F"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def _A ( lowerCamelCase ): def function_to_integrate(lowerCamelCase ) -> float: return sqrt(4.0 - x * x ) a__ : int = area_under_curve_estimator( lowerCamelCase , lowerCamelCase , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {pi}""" ) print(F"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
629
0
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __lowerCAmelCase ( __lowerCAmelCase ): def __init__( self , snake_case , snake_case = None , snake_case = None , snake_case = True , snake_case = None , snake_case = False , snake_case = None , snake_case = True , snake_case = "arrow" , **snake_case , ) -> Dict: """simple docstring""" super().__init__( split=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , **lowerCAmelCase_ , ) a__ : Optional[Any] = load_from_cache_file a__ : Optional[int] = file_format a__ : str = Spark( df=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , working_dir=lowerCAmelCase_ , **lowerCAmelCase_ , ) def _snake_case ( self ) -> str: """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) a__ : int = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=lowerCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
717
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _A ( lowerCamelCase , lowerCamelCase ): a__ : Dict = old_name if "patch_embed" in old_name: a__ , a__ , a__ : Union[str, Any] = old_name.split("." ) if layer == "0": a__ : Union[str, Any] = old_name.replace("0" , "convolution1" ) elif layer == "1": a__ : Dict = old_name.replace("1" , "batchnorm_before" ) elif layer == "3": a__ : List[str] = old_name.replace("3" , "convolution2" ) else: a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" ) if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ): a__ : List[str] = r"\b\d{2}\b" if bool(re.search(lowerCamelCase , lowerCamelCase ) ): a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group() else: a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group() if int(match[0] ) < 6: a__ : List[Any] = old_name.replace(lowerCamelCase , "" ) a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] ) a__ : List[Any] = "intermediate_stages." + trimmed_name else: a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" ) if int(match[2] ) < num_meta4D_last_stage: a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] ) else: a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage ) a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index ) if "norm1" in old_name: a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" ) elif "norm2" in old_name: a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" ) elif "fc1" in old_name: a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" ) elif "fc2" in old_name: a__ : Any = trimmed_name.replace("fc2" , "linear_out" ) a__ : Any = "last_stage." + trimmed_name elif "network" in old_name and re.search(r".\d." , lowerCamelCase ): a__ : List[str] = old_name.replace("network" , "intermediate_stages" ) if "fc" in new_name: a__ : str = new_name.replace("fc" , "convolution" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): a__ : str = new_name.replace("norm1" , "batchnorm_before" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): a__ : Any = new_name.replace("norm2" , "batchnorm_after" ) if "proj" in new_name: a__ : Optional[int] = new_name.replace("proj" , "projection" ) if "dist_head" in new_name: a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" ) elif "head" in new_name: a__ : Optional[int] = new_name.replace("head" , "classifier" ) elif "patch_embed" in new_name: a__ : Tuple = "efficientformer." + new_name elif new_name == "norm.weight" or new_name == "norm.bias": a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" ) a__ : Optional[int] = "efficientformer." + new_name else: a__ : List[Any] = "efficientformer.encoder." + new_name return new_name def _A ( lowerCamelCase , lowerCamelCase ): for key in checkpoint.copy().keys(): a__ : Optional[Any] = checkpoint.pop(lowerCamelCase ) a__ : Dict = val return checkpoint def _A ( ): a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return image def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"] a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase ) a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase ) a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] ) a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1 a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() a__ : Dict = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } # prepare image a__ : str = prepare_img() a__ : Dict = 256 a__ : Union[str, Any] = 224 a__ : List[str] = EfficientFormerImageProcessor( size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , ) a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values # original processing pipeline a__ : List[str] = Compose( [ Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ), CenterCrop(lowerCamelCase ), ToTensor(), Normalize(lowerCamelCase , lowerCamelCase ), ] ) a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 ) assert torch.allclose(lowerCamelCase , lowerCamelCase ) a__ : Optional[int] = model(lowerCamelCase ) a__ : Any = outputs.logits a__ : Optional[Any] = (1, 1000) if "l1" in model_name: a__ : Tuple = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: a__ : int = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: a__ : Optional[Any] = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(lowerCamelCase ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print("Pushing model to the hub..." ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
629
0
import requests SCREAMING_SNAKE_CASE__ : Union[str, Any] = '' # <-- Put your OpenWeatherMap appid here! SCREAMING_SNAKE_CASE__ : int = 'https://api.openweathermap.org/data/2.5/' def _A ( lowerCamelCase = "Chicago" , lowerCamelCase = APPID ): return requests.get(URL_BASE + "weather" , params=locals() ).json() def _A ( lowerCamelCase = "Kolkata, India" , lowerCamelCase = APPID ): return requests.get(URL_BASE + "forecast" , params=locals() ).json() def _A ( lowerCamelCase = 55.68 , lowerCamelCase = 12.57 , lowerCamelCase = APPID ): return requests.get(URL_BASE + "onecall" , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: SCREAMING_SNAKE_CASE__ : str = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
718
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE__ : Any = { """unc-nlp/lxmert-base-uncased""": 5_1_2, } SCREAMING_SNAKE_CASE__ : Optional[int] = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = LxmertTokenizer def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any: """simple docstring""" super().__init__( snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , ) a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars ): a__ : str = getattr(snake_case , normalizer_state.pop("type" ) ) a__ : Tuple = do_lower_case a__ : Union[str, Any] = strip_accents a__ : str = tokenize_chinese_chars a__ : List[str] = normalizer_class(**snake_case ) a__ : str = do_lower_case def _snake_case ( self , snake_case , snake_case=None ) -> List[str]: """simple docstring""" a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , snake_case , snake_case = None ) -> List[int]: """simple docstring""" a__ : Tuple = [self.sep_token_id] a__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]: """simple docstring""" a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case )
629
0
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ): def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : Dict = tempfile.mkdtemp() a__ : Optional[Any] = 8 # DPR tok a__ : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a__ : Any = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) a__ : Any = os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok a__ : List[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] a__ : Tuple = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) a__ : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a__ : Tuple = {"unk_token": "<unk>"} a__ : int = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) a__ : List[str] = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) a__ : str = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def _snake_case ( self ) -> List[Any]: """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def _snake_case ( self ) -> Tuple: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Dict = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Union[str, Any] = self.get_dummy_dataset() a__ : Optional[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: a__ : Tuple = dataset a__ : Optional[int] = RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _snake_case ( self , snake_case ) -> Union[str, Any]: """simple docstring""" a__ : Dict = self.get_dummy_dataset() a__ : Tuple = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: a__ : Tuple = os.path.join(self.tmpdirname , "dataset" ) a__ : Optional[Any] = os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset a__ : List[Any] = RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: a__ : Optional[int] = RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , ) return retriever def _snake_case ( self ) -> Any: """simple docstring""" a__ : List[str] = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) a__ : Optional[int] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) a__ : int = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) a__ : str = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(snake_case__ , open(snake_case__ , "wb" ) ) a__ : List[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) a__ : Optional[Any] = RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Dict = 1 a__ : Optional[Any] = self.get_dummy_canonical_hf_index_retriever() a__ : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Optional[int] = retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Optional[Any] = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: a__ : Tuple = self.get_dummy_dataset() retriever.save_pretrained(snake_case__ ) a__ : int = RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) a__ : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Tuple = retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : List[Any] = 1 a__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) a__ : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Any = retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) a__ : int = RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) a__ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : str = retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def _snake_case ( self ) -> int: """simple docstring""" a__ : Any = 1 a__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) a__ : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Optional[int] = retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) a__ : Optional[Any] = RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) a__ : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Union[str, Any] = retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def _snake_case ( self ) -> Any: """simple docstring""" a__ : Dict = 1 a__ : int = self.get_dummy_legacy_index_retriever() a__ : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Optional[Any] = retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Optional[int] = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) a__ : List[Any] = RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) a__ : str = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Tuple = retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _snake_case ( self ) -> Any: """simple docstring""" import torch a__ : str = 1 a__ : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever() a__ : str = [[5, 7], [10, 11]] a__ : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : Dict = retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) a__ : Optional[int] = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , np.ndarray ) a__ : Any = retriever( snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , ) a__ : Tuple = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : List[str] = self.get_dpr_ctx_encoder_tokenizer() a__ : Dict = 1 a__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) retriever.set_ctx_encoder_tokenizer(snake_case__ ) a__ : List[str] = [[5, 7], [10, 11]] a__ : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) a__ : List[Any] = retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) self.assertEqual( len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
719
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[int] = """mobilenet_v2""" def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int: """simple docstring""" super().__init__(**snake_case ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) a__ : str = num_channels a__ : Dict = image_size a__ : Any = depth_multiplier a__ : str = depth_divisible_by a__ : Optional[int] = min_depth a__ : Dict = expand_ratio a__ : str = output_stride a__ : Optional[int] = first_layer_is_expansion a__ : Union[str, Any] = finegrained_output a__ : Union[str, Any] = hidden_act a__ : str = tf_padding a__ : List[Any] = classifier_dropout_prob a__ : List[Any] = initializer_range a__ : Optional[Any] = layer_norm_eps a__ : str = semantic_loss_ignore_index class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Any = version.parse("""1.11""" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _snake_case ( self ) -> float: """simple docstring""" return 1E-4
629
0
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( __lowerCamelCase ,unittest.TestCase ): _UpperCamelCase : Any = LEDTokenizer _UpperCamelCase : int = LEDTokenizerFast _UpperCamelCase : Any = True def _snake_case ( self ) -> Any: """simple docstring""" super().setUp() a__ : Any = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] a__ : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) a__ : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a__ : List[str] = {"unk_token": "<unk>"} a__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self , **snake_case ) -> Any: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self , **snake_case ) -> str: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self , snake_case ) -> Optional[int]: """simple docstring""" return "lower newer", "lower newer" @cached_property def _snake_case ( self ) -> int: """simple docstring""" return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def _snake_case ( self ) -> str: """simple docstring""" return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] a__ : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: a__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , max_length=len(SCREAMING_SNAKE_CASE_ ) , padding=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) a__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @require_torch def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: a__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) self.assertIn("input_ids" , SCREAMING_SNAKE_CASE_ ) self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE_ ) self.assertNotIn("labels" , SCREAMING_SNAKE_CASE_ ) self.assertNotIn("decoder_attention_mask" , SCREAMING_SNAKE_CASE_ ) @require_torch def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Any = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: a__ : Union[str, Any] = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def _snake_case ( self ) -> str: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: a__ : str = tokenizer( ["I am a small frog" * 1_024, "I am a small frog"] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def _snake_case ( self ) -> Any: """simple docstring""" a__ : int = ["A long paragraph for summarization."] a__ : Optional[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: a__ : Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) a__ : str = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) a__ : Optional[Any] = inputs["input_ids"] a__ : int = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def _snake_case ( self ) -> List[Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: a__ : Any = ["Summary of the text.", "Another summary."] a__ : str = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] a__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) a__ : str = [[0] * len(SCREAMING_SNAKE_CASE_ ) for x in encoded_output["input_ids"]] a__ : Union[str, Any] = tokenizer.pad(SCREAMING_SNAKE_CASE_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self ) -> Dict: """simple docstring""" pass def _snake_case ( self ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a__ : int = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) a__ : Dict = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) a__ : Union[str, Any] = "A, <mask> AllenNLP sentence." a__ : str = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) a__ : int = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) a__ : int = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) a__ : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
720
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def _A ( lowerCamelCase ): a__ : List[str] = [] if isinstance(lowerCamelCase , lowerCamelCase ): for v in tree.values(): shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase ): a__ : List[str] = [] for d in reversed(lowerCamelCase ): idx.append(flat_idx % d ) a__ : Union[str, Any] = flat_idx // d return tuple(reversed(lowerCamelCase ) ) @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ): # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(lowerCamelCase ) -> None: a__ : int = True for i in range(len(lowerCamelCase ) ): a__ : Optional[Any] = -1 * (i + 1) l[reversed_idx] &= tally a__ : Tuple = l[reversed_idx] if start_edges is None: a__ : Optional[int] = [s == 0 for s in start] reduce_edge_list(lowerCamelCase ) if end_edges is None: a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )] reduce_edge_list(lowerCamelCase ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCamelCase ) == 0: return [()] elif len(lowerCamelCase ) == 1: return [(slice(start[0] , end[0] + 1 ),)] a__ : List[Tuple[slice, ...]] = [] a__ : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCamelCase , lowerCamelCase ): if s == e: path_list.append(slice(lowerCamelCase , s + 1 ) ) else: break a__ : Tuple[slice, ...] = tuple(lowerCamelCase ) a__ : Optional[Any] = len(lowerCamelCase ) # start == end, and we're done if divergence_idx == len(lowerCamelCase ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : Optional[Any] = start[divergence_idx] return tuple( path + (slice(lowerCamelCase , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : List[str] = end[divergence_idx] return tuple( path + (slice(lowerCamelCase , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) a__ : Optional[int] = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : Optional[int] = t.shape[:no_batch_dims] a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) ) # _get_minimal_slice_set is inclusive a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) ) # Get an ordered list of slices to perform a__ : str = _get_minimal_slice_set( lowerCamelCase , lowerCamelCase , lowerCamelCase , ) a__ : Any = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ): if not (len(lowerCamelCase ) > 0): raise ValueError("Must provide at least one input" ) a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )] a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] ) def _prep_inputs(lowerCamelCase ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase ) a__ : str = None if _out is not None: a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) a__ : Optional[Any] = 1 for d in orig_batch_dims: flat_batch_dim *= d a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(lowerCamelCase ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t a__ : str = 0 a__ : Any = prepped_outputs for _ in range(lowerCamelCase ): # Chunk the input if not low_mem: a__ : str = _select_chunk else: a__ : Tuple = partial( _chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , ) a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase ) # Run the layer on the chunk a__ : Any = layer(**lowerCamelCase ) # Allocate space for the output if out is None: a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase ) # Put the chunk in its pre-allocated space if isinstance(lowerCamelCase , lowerCamelCase ): def assign(lowerCamelCase , lowerCamelCase ) -> None: for k, v in da.items(): if isinstance(lowerCamelCase , lowerCamelCase ): assign(lowerCamelCase , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: a__ : Dict = da[k] assign(lowerCamelCase , lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): for xa, xa in zip(lowerCamelCase , lowerCamelCase ): if _add_into_out: xa[i : i + chunk_size] += xa else: a__ : Dict = xa elif isinstance(lowerCamelCase , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: a__ : Dict = output_chunk else: raise ValueError("Not supported" ) i += chunk_size a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase ) return out class __lowerCAmelCase : def __init__( self , snake_case = 512 , ) -> List[str]: """simple docstring""" a__ : int = max_chunk_size a__ : Optional[int] = None a__ : Optional[tuple] = None def _snake_case ( self , snake_case , snake_case , snake_case ) -> int: """simple docstring""" logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] a__ : List[str] = [c for c in candidates if c > min_chunk_size] a__ : Optional[int] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(snake_case ) -> bool: try: with torch.no_grad(): fn(*snake_case , chunk_size=snake_case ) return True except RuntimeError: return False a__ : Union[str, Any] = 0 a__ : Dict = len(snake_case ) - 1 while i > min_viable_chunk_size_index: a__ : Any = test_chunk_size(candidates[i] ) if not viable: a__ : List[Any] = (min_viable_chunk_size_index + i) // 2 else: a__ : Tuple = i a__ : Any = (i + len(snake_case ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _snake_case ( self , snake_case , snake_case ) -> bool: """simple docstring""" a__ : str = True for aa, aa in zip(snake_case , snake_case ): assert type(snake_case ) == type(snake_case ) if isinstance(snake_case , (list, tuple) ): consistent &= self._compare_arg_caches(snake_case , snake_case ) elif isinstance(snake_case , snake_case ): a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] consistent &= self._compare_arg_caches(snake_case , snake_case ) else: consistent &= aa == aa return consistent def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int: """simple docstring""" a__ : List[Any] = True a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(snake_case ) a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case ) else: # Otherwise, we can reuse the precomputed value a__ : Optional[int] = False if not consistent: a__ : List[str] = self._determine_favorable_chunk_size( snake_case , snake_case , snake_case , ) a__ : List[str] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
629
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class __lowerCAmelCase ( _snake_case ): _UpperCamelCase : str = """table-transformer""" _UpperCamelCase : Optional[Any] = ["""past_key_values"""] _UpperCamelCase : Dict = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=100 , snake_case=6 , snake_case=2_048 , snake_case=8 , snake_case=6 , snake_case=2_048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=1 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=5 , snake_case=2 , snake_case=0.1 , **snake_case , ) -> Tuple: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) a__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(snake_case_ , snake_case_ ): a__ : List[str] = backbone_config.get("model_type" ) a__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] a__ : List[Any] = config_class.from_dict(snake_case_ ) # set timm attributes to None a__ : Optional[int] = None, None, None a__ : int = use_timm_backbone a__ : Tuple = backbone_config a__ : int = num_channels a__ : Any = num_queries a__ : Optional[Any] = d_model a__ : List[Any] = encoder_ffn_dim a__ : List[str] = encoder_layers a__ : List[str] = encoder_attention_heads a__ : Optional[int] = decoder_ffn_dim a__ : str = decoder_layers a__ : int = decoder_attention_heads a__ : Optional[Any] = dropout a__ : List[str] = attention_dropout a__ : Dict = activation_dropout a__ : int = activation_function a__ : Dict = init_std a__ : Optional[int] = init_xavier_std a__ : List[str] = encoder_layerdrop a__ : Optional[Any] = decoder_layerdrop a__ : Union[str, Any] = encoder_layers a__ : int = auxiliary_loss a__ : Any = position_embedding_type a__ : List[Any] = backbone a__ : Tuple = use_pretrained_backbone a__ : Any = dilation # Hungarian matcher a__ : List[str] = class_cost a__ : Optional[int] = bbox_cost a__ : str = giou_cost # Loss coefficients a__ : Dict = mask_loss_coefficient a__ : Any = dice_loss_coefficient a__ : Optional[Any] = bbox_loss_coefficient a__ : Tuple = giou_loss_coefficient a__ : Union[str, Any] = eos_coefficient super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ ) @property def _snake_case ( self ) -> Dict: """simple docstring""" return self.encoder_attention_heads @property def _snake_case ( self ) -> List[str]: """simple docstring""" return self.d_model class __lowerCAmelCase ( _snake_case ): _UpperCamelCase : List[str] = version.parse("""1.11""" ) @property def _snake_case ( self ) -> List[str]: """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _snake_case ( self ) -> List[str]: """simple docstring""" return 1E-5 @property def _snake_case ( self ) -> int: """simple docstring""" return 12
721
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : int = """upernet""" def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]: """simple docstring""" super().__init__(**snake_case ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(snake_case , snake_case ): a__ : Optional[int] = backbone_config.get("model_type" ) a__ : str = CONFIG_MAPPING[backbone_model_type] a__ : str = config_class.from_dict(snake_case ) a__ : int = backbone_config a__ : Optional[Any] = hidden_size a__ : Optional[Any] = initializer_range a__ : Tuple = pool_scales a__ : Optional[Any] = use_auxiliary_head a__ : Optional[Any] = auxiliary_loss_weight a__ : Dict = auxiliary_in_channels a__ : Optional[int] = auxiliary_channels a__ : Any = auxiliary_num_convs a__ : Any = auxiliary_concat_input a__ : int = loss_ignore_index def _snake_case ( self ) -> str: """simple docstring""" a__ : Tuple = copy.deepcopy(self.__dict__ ) a__ : Optional[Any] = self.backbone_config.to_dict() a__ : List[Any] = self.__class__.model_type return output
629
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) def _A ( lowerCamelCase ): # initialize config if "resnet-50" in model_name: a__ : Any = ResNetConfig.from_pretrained("microsoft/resnet-50" ) elif "resnet-101" in model_name: a__ : Any = ResNetConfig.from_pretrained("microsoft/resnet-101" ) else: raise ValueError("Model name should include either resnet50 or resnet101" ) a__ : Union[str, Any] = DetrConfig(use_timm_backbone=lowerCamelCase , backbone_config=lowerCamelCase ) # set label attributes a__ : List[str] = '''panoptic''' in model_name if is_panoptic: a__ : Any = 250 else: a__ : Any = 91 a__ : Tuple = '''huggingface/label-files''' a__ : Any = '''coco-detection-id2label.json''' a__ : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) ) a__ : Optional[int] = {int(lowerCamelCase ): v for k, v in idalabel.items()} a__ : Any = idalabel a__ : Tuple = {v: k for k, v in idalabel.items()} return config, is_panoptic def _A ( lowerCamelCase ): # here we list all keys to be renamed (original name on the left, our name on the right) a__ : Dict = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") ) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") ) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") ) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") ) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) return rename_keys def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : List[str] = state_dict.pop(lowerCamelCase ) a__ : Tuple = val def _A ( lowerCamelCase , lowerCamelCase=False ): a__ : Union[str, Any] = '''''' if is_panoptic: a__ : str = '''detr.''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) a__ : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) a__ : Tuple = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict a__ : Dict = in_proj_weight[:256, :] a__ : int = in_proj_bias[:256] a__ : Optional[int] = in_proj_weight[256:512, :] a__ : Dict = in_proj_bias[256:512] a__ : Union[str, Any] = in_proj_weight[-256:, :] a__ : Optional[Any] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention a__ : List[str] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) a__ : Optional[Any] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict a__ : Any = in_proj_weight[:256, :] a__ : Dict = in_proj_bias[:256] a__ : Union[str, Any] = in_proj_weight[256:512, :] a__ : Any = in_proj_bias[256:512] a__ : Dict = in_proj_weight[-256:, :] a__ : List[Any] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention a__ : Dict = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) a__ : int = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict a__ : str = in_proj_weight_cross_attn[:256, :] a__ : Tuple = in_proj_bias_cross_attn[:256] a__ : Tuple = in_proj_weight_cross_attn[256:512, :] a__ : Tuple = in_proj_bias_cross_attn[256:512] a__ : Tuple = in_proj_weight_cross_attn[-256:, :] a__ : List[str] = in_proj_bias_cross_attn[-256:] def _A ( ): a__ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return im @torch.no_grad() def _A ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=False ): a__ : Tuple = get_detr_config(lowerCamelCase ) # load original model from torch hub a__ : List[str] = { '''detr-resnet-50''': '''detr_resnet50''', '''detr-resnet-101''': '''detr_resnet101''', } logger.info(F"""Converting model {model_name}...""" ) a__ : Any = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=lowerCamelCase ).eval() a__ : List[Any] = detr.state_dict() # rename keys for src, dest in create_rename_keys(lowerCamelCase ): if is_panoptic: a__ : Optional[int] = '''detr.''' + src rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCamelCase , is_panoptic=lowerCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them a__ : Optional[Any] = '''detr.model.''' if is_panoptic else '''model.''' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): a__ : Dict = state_dict.pop(lowerCamelCase ) a__ : Dict = val elif "class_labels_classifier" in key or "bbox_predictor" in key: a__ : Optional[int] = state_dict.pop(lowerCamelCase ) a__ : str = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: a__ : Dict = state_dict.pop(lowerCamelCase ) a__ : Union[str, Any] = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): a__ : List[str] = state_dict.pop(lowerCamelCase ) a__ : Optional[int] = val # finally, create HuggingFace model and load state dict a__ : str = DetrForSegmentation(lowerCamelCase ) if is_panoptic else DetrForObjectDetection(lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() # verify our conversion on an image a__ : int = '''coco_panoptic''' if is_panoptic else '''coco_detection''' a__ : Any = DetrImageProcessor(format=lowerCamelCase ) a__ : List[Any] = processor(images=prepare_img() , return_tensors="pt" ) a__ : Optional[int] = encoding['''pixel_values'''] a__ : Optional[int] = detr(lowerCamelCase ) a__ : int = model(lowerCamelCase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub..." ) model.push_to_hub(F"""nielsr/{model_name}""" ) processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""detr-resnet-50""", type=str, choices=["""detr-resnet-50""", """detr-resnet-101"""], help="""Name of the DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""") SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
700
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): SCREAMING_SNAKE_CASE__ : int = { """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: SCREAMING_SNAKE_CASE__ : Dict = { """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _A ( lowerCamelCase ): a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 ) a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() a__ : int = numpy_to_pil(lowerCamelCase ) return images def _A ( lowerCamelCase ): if images.ndim == 3: a__ : Tuple = images[None, ...] a__ : Dict = (images * 255).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
629
0
import requests SCREAMING_SNAKE_CASE__ : int = """""" # <-- Put your OpenWeatherMap appid here! SCREAMING_SNAKE_CASE__ : Optional[Any] = """https://api.openweathermap.org/data/2.5/""" def _A ( lowerCamelCase = "Chicago" , lowerCamelCase = APPID ): return requests.get(URL_BASE + "weather" , params=locals() ).json() def _A ( lowerCamelCase = "Kolkata, India" , lowerCamelCase = APPID ): return requests.get(URL_BASE + "forecast" , params=locals() ).json() def _A ( lowerCamelCase = 55.68 , lowerCamelCase = 12.57 , lowerCamelCase = APPID ): return requests.get(URL_BASE + "onecall" , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: SCREAMING_SNAKE_CASE__ : Dict = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
701
# Lint as: python3 import itertools import os import re SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""") SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""") SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""") SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""") SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*""" def _A ( lowerCamelCase ): a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase ) a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase ) return name.lower() def _A ( lowerCamelCase ): a__ : Tuple = _single_underscore_re.split(lowerCamelCase ) a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" ) def _A ( lowerCamelCase ): if os.path.basename(lowerCamelCase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) return camelcase_to_snakecase(lowerCamelCase ) def _A ( lowerCamelCase , lowerCamelCase ): if os.path.basename(lowerCamelCase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) if not re.match(_split_re , lowerCamelCase ): raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" ) return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}""" def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ): a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase ) if filetype_suffix: prefix += F""".{filetype_suffix}""" a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase ) return F"""{filepath}*""" def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ): a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase ) a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase ) if shard_lengths: a__ : List[str] = len(lowerCamelCase ) a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )] if filetype_suffix: a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames] return filenames else: a__ : Optional[int] = prefix if filetype_suffix: filename += F""".{filetype_suffix}""" return [filename]
629
0
SCREAMING_SNAKE_CASE__ : Tuple = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' SCREAMING_SNAKE_CASE__ : Union[str, Any] = [{'type': 'code', 'content': INSTALL_CONTENT}] SCREAMING_SNAKE_CASE__ : List[Any] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ """SEW_PRETRAINED_MODEL_ARCHIVE_LIST""", """SEWForCTC""", """SEWForSequenceClassification""", """SEWModel""", """SEWPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def _A ( ): a__ : int = argparse.ArgumentParser() parser.add_argument("--model_ckpt" , type=UpperCamelCase__ , default="microsoft/unixcoder-base-nine" ) parser.add_argument("--num_epochs" , type=UpperCamelCase__ , default=5 ) parser.add_argument("--batch_size" , type=UpperCamelCase__ , default=6 ) parser.add_argument("--gradient_accumulation_steps" , type=UpperCamelCase__ , default=1 ) parser.add_argument("--freeze" , type=UpperCamelCase__ , default=UpperCamelCase__ ) parser.add_argument("--learning_rate" , type=UpperCamelCase__ , default=5E-4 ) parser.add_argument("--seed" , type=UpperCamelCase__ , default=0 ) parser.add_argument("--lr_scheduler_type" , type=UpperCamelCase__ , default="cosine" ) parser.add_argument("--num_warmup_steps" , type=UpperCamelCase__ , default=10 ) parser.add_argument("--weight_decay" , type=UpperCamelCase__ , default=0.01 ) parser.add_argument("--output_dir" , type=UpperCamelCase__ , default="./results" ) return parser.parse_args() SCREAMING_SNAKE_CASE__ : Dict = load("""accuracy""") def _A ( lowerCamelCase ): a__ , a__ : Tuple = eval_pred a__ : int = np.argmax(UpperCamelCase__ , axis=1 ) return metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ ) class __lowerCAmelCase ( UpperCAmelCase_ ): def __init__( self , snake_case ) -> List[str]: """simple docstring""" super().__init__() a__ : Optional[int] = trainer def _snake_case ( self , snake_case , snake_case , snake_case , **snake_case ) -> Tuple: """simple docstring""" if control.should_evaluate: a__ : Tuple = deepcopy(_lowercase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" ) return control_copy def _A ( ): a__ : int = get_args() set_seed(args.seed ) a__ : Tuple = load_dataset("codeparrot/codecomplex" , split="train" ) a__ : int = dataset.train_test_split(test_size=0.2 ) a__ : List[str] = train_test["test"].train_test_split(test_size=0.5 ) a__ : Optional[Any] = DatasetDict( { "train": train_test["train"], "test": test_validation["train"], "valid": test_validation["test"], } ) print("Loading tokenizer and model" ) a__ : str = AutoTokenizer.from_pretrained(args.model_ckpt ) a__ : str = tokenizer.eos_token a__ : int = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) a__ : Tuple = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): a__ : int = False a__ : Any = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) ) def tokenize(lowerCamelCase ): a__ : Dict = tokenizer(example["src"] , truncation=UpperCamelCase__ , max_length=1024 ) a__ : List[str] = labels.straint(example["complexity"] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } a__ : Optional[Any] = train_test_validation.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=train_test_validation["train"].column_names , ) a__ : str = DataCollatorWithPadding(tokenizer=UpperCamelCase__ ) a__ : List[str] = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , ) a__ : Dict = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) print("Training..." ) trainer.add_callback(CustomCallback(UpperCamelCase__ ) ) trainer.train() if __name__ == "__main__": main()
703
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : int = { """configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""], """tokenization_cpmant""": ["""CpmAntTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = [ """CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""", """CpmAntForCausalLM""", """CpmAntModel""", """CpmAntPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : List[str] = """▁""" SCREAMING_SNAKE_CASE__ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class __lowerCAmelCase ( snake_case__ ,unittest.TestCase ): _UpperCamelCase : Optional[int] = BertGenerationTokenizer _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : Any = True def _snake_case ( self ) -> Dict: """simple docstring""" super().setUp() a__ : List[Any] = BertGenerationTokenizer(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self ) -> int: """simple docstring""" a__ : int = "<s>" a__ : List[str] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def _snake_case ( self ) -> Dict: """simple docstring""" a__ : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(lowercase_ ) , 1_002 ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Optional[int] = BertGenerationTokenizer(lowercase_ , keep_accents=lowercase_ ) a__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , ) a__ : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : Dict = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) a__ : Any = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _snake_case ( self ) -> Tuple: """simple docstring""" return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) @slow def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : List[str] = "Hello World!" a__ : Optional[int] = [18_536, 2_260, 101] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @slow def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Optional[Any] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) a__ : List[Any] = [ 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, ] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def _snake_case ( self ) -> Optional[int]: """simple docstring""" import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence a__ : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] a__ : Optional[Any] = " ".join(lowercase_ ) a__ : Optional[Any] = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" , return_token_type_ids=lowercase_ ) a__ : Dict = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowercase_ ) a__ : List[str] = BertGenerationConfig() a__ : str = BertGenerationEncoder(lowercase_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def _snake_case ( self ) -> int: """simple docstring""" a__ : List[Any] = {"input_ids": [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
704
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin SCREAMING_SNAKE_CASE__ : Dict = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ): def _snake_case ( self ) -> str: """simple docstring""" a__ : Optional[int] = load_tool("text-question-answering" ) self.tool.setup() a__ : Dict = load_tool("text-question-answering" , remote=snake_case ) def _snake_case ( self ) -> Dict: """simple docstring""" a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> Any: """simple docstring""" a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> int: """simple docstring""" a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
629
0
def _A ( lowerCamelCase ): a__ : Optional[int] = [[0 for _ in range(lowerCamelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): a__ : Tuple = 1 for n in range(m + 1 ): for k in range(1 , lowerCamelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""Enter a number: """).strip()) print(partition(n)) except ValueError: print("""Please enter a number.""") else: try: SCREAMING_SNAKE_CASE__ : Any = int(sys.argv[1]) print(partition(n)) except ValueError: print("""Please pass a number.""")
705
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowerCAmelCase ( _UpperCamelCase ): @require_torch def _snake_case ( self ) -> str: """simple docstring""" a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__ : Tuple = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__ : Dict = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Tuple = "1" a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__ : List[Any] = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__ : Any = self.get_env() a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n " a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n " a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " # baseline - just load from_pretrained with normal network a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a__ : str = self.get_env() a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # next emulate no network a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Union[str, Any] = "1" a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Optional[Any] = "\nfrom transformers import pipeline\n " a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n " a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " a__ : List[str] = self.get_env() a__ : Union[str, Any] = "1" a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )] a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , ) @require_torch def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Any = "\nfrom transformers import AutoModel\n " a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n " # baseline - just load from_pretrained with normal network a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a__ : Optional[Any] = self.get_env() a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Dict = "1" a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() )
629
0
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ = '' SCREAMING_SNAKE_CASE__ = '' SCREAMING_SNAKE_CASE__ = '' SCREAMING_SNAKE_CASE__ = 1 # (0 is vertical, 1 is horizontal) def _A ( ): a__ : Tuple = get_dataset(_lowerCamelCase , _lowerCamelCase ) print("Processing..." ) a__ : Union[str, Any] = update_image_and_anno(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for index, image in enumerate(_lowerCamelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' a__ : Optional[Any] = random_chars(32 ) a__ : List[str] = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0] a__ : Tuple = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(F"""/{file_root}.jpg""" , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Success {index+1}/{len(_lowerCamelCase )} with {file_name}""" ) a__ : List[Any] = [] for anno in new_annos[index]: a__ : Union[str, Any] = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(_lowerCamelCase ) with open(F"""/{file_root}.txt""" , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def _A ( lowerCamelCase , lowerCamelCase ): a__ : Any = [] a__ : Optional[Any] = [] for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt" ) ): a__ : List[str] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(_lowerCamelCase ) as in_file: a__ : Optional[int] = in_file.readlines() a__ : Optional[Any] = os.path.join(_lowerCamelCase , F"""{label_name}.jpg""" ) a__ : Any = [] for obj_list in obj_lists: a__ : Optional[Any] = obj_list.rstrip("\n" ).split(" " ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_lowerCamelCase ) labels.append(_lowerCamelCase ) return img_paths, labels def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 ): a__ : List[Any] = [] a__ : Any = [] a__ : List[Any] = [] for idx in range(len(_lowerCamelCase ) ): a__ : Union[str, Any] = [] a__ : Optional[int] = img_list[idx] path_list.append(_lowerCamelCase ) a__ : Tuple = anno_list[idx] a__ : Union[str, Any] = cva.imread(_lowerCamelCase ) if flip_type == 1: a__ : Union[str, Any] = cva.flip(_lowerCamelCase , _lowerCamelCase ) for bbox in img_annos: a__ : List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: a__ : Optional[int] = cva.flip(_lowerCamelCase , _lowerCamelCase ) for bbox in img_annos: a__ : Any = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_lowerCamelCase ) new_imgs_list.append(_lowerCamelCase ) return new_imgs_list, new_annos_lists, path_list def _A ( lowerCamelCase = 32 ): assert number_char > 1, "The number of character should greater than 1" a__ : Optional[Any] = ascii_lowercase + digits return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) ) if __name__ == "__main__": main() print("""DONE ✅""")
706
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ """VAN_PRETRAINED_MODEL_ARCHIVE_LIST""", """VanForImageClassification""", """VanModel""", """VanPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
629
0
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCAmelCase : def __init__( self , snake_case , snake_case=13 , snake_case=3 , snake_case=True , snake_case=True , snake_case=0.1 , snake_case=0.1 , snake_case=224 , snake_case=1_000 , snake_case=[3, 3, 6, 4] , snake_case=[48, 56, 112, 220] , ) -> Union[str, Any]: """simple docstring""" a__ : int = parent a__ : Tuple = batch_size a__ : Optional[int] = num_channels a__ : Any = is_training a__ : Any = use_labels a__ : Dict = hidden_dropout_prob a__ : str = attention_probs_dropout_prob a__ : str = num_labels a__ : Optional[int] = image_size a__ : Optional[Any] = layer_depths a__ : Dict = embed_dims def _snake_case ( self ) -> int: """simple docstring""" a__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Dict = None if self.use_labels: a__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) a__ : Optional[Any] = self.get_config() return config, pixel_values, labels def _snake_case ( self ) -> Any: """simple docstring""" return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__UpperCamelCase , layer_scale_init_value=1E-5 , ) def _snake_case ( self , snake_case , snake_case , snake_case ) -> List[Any]: """simple docstring""" a__ : Union[str, Any] = SwiftFormerModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() a__ : str = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _snake_case ( self , snake_case , snake_case , snake_case ) -> str: """simple docstring""" a__ : int = self.num_labels a__ : Union[str, Any] = SwiftFormerForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() a__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) a__ : Optional[int] = SwiftFormerForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() a__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Tuple = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self ) -> Dict: """simple docstring""" ((a__) , (a__) , (a__)) : Dict = self.prepare_config_and_inputs() a__ : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ): _UpperCamelCase : Union[str, Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () _UpperCamelCase : Tuple = ( {"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification} if is_torch_available() else {} ) _UpperCamelCase : int = False _UpperCamelCase : Any = False _UpperCamelCase : Tuple = False _UpperCamelCase : int = False _UpperCamelCase : Optional[int] = False def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : int = SwiftFormerModelTester(self ) a__ : str = ConfigTester( self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds" ) def _snake_case ( self ) -> Tuple: """simple docstring""" pass def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ , a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Union[str, Any] = model_class(__UpperCamelCase ) a__ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ , a__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Tuple = model_class(__UpperCamelCase ) a__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : List[Any] = [*signature.parameters.keys()] a__ : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def _snake_case ( self ) -> List[Any]: """simple docstring""" for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : List[str] = SwiftFormerModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @unittest.skip(reason="SwiftFormer does not output attentions" ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" pass def _snake_case ( self ) -> List[Any]: """simple docstring""" def check_hidden_states_output(snake_case , snake_case , snake_case ): a__ : Dict = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): a__ : str = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) a__ : Tuple = outputs.hidden_states a__ : Dict = 8 self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__UpperCamelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : str = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a__ : Tuple = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def _snake_case ( self ) -> Dict: """simple docstring""" def _config_zero_init(snake_case ): a__ : Union[str, Any] = copy.deepcopy(__UpperCamelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__UpperCamelCase , __UpperCamelCase , 1E-10 ) if isinstance(getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ): a__ : Union[str, Any] = _config_zero_init(getattr(__UpperCamelCase , __UpperCamelCase ) ) setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return configs_no_init a__ , a__ : Any = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(__UpperCamelCase ) for model_class in self.all_model_classes: a__ : Union[str, Any] = model_class(config=__UpperCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" pass def _A ( ): a__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> Dict: """simple docstring""" return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None @slow def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : int = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__UpperCamelCase ) a__ : Optional[Any] = self.default_image_processor a__ : List[str] = prepare_img() a__ : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): a__ : Optional[int] = model(**__UpperCamelCase ) # verify the logits a__ : Any = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) a__ : int = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
707
from PIL import Image def _A ( lowerCamelCase , lowerCamelCase ): def brightness(lowerCamelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("level must be between -255.0 (black) and 255.0 (white)" ) return img.point(lowerCamelCase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
629
0
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( A_ ,unittest.TestCase ): _UpperCamelCase : Any = RobertaTokenizer _UpperCamelCase : Optional[int] = RobertaTokenizerFast _UpperCamelCase : Union[str, Any] = True _UpperCamelCase : List[str] = {'''cls_token''': '''<s>'''} def _snake_case ( self ) -> int: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a__ : Dict = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] a__ : Any = dict(zip(snake_case , range(len(snake_case ) ) ) ) a__ : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] a__ : int = {"""unk_token""": """<unk>"""} a__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case ) ) def _snake_case ( self , **snake_case ) -> Any: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case ) def _snake_case ( self , **snake_case ) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def _snake_case ( self , snake_case ) -> Dict: """simple docstring""" a__ : Union[str, Any] = """lower newer""" a__ : Optional[int] = """lower newer""" return input_text, output_text def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ : int = """lower newer""" a__ : Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] a__ : List[Any] = tokenizer.tokenize(snake_case ) # , add_prefix_space=True) self.assertListEqual(snake_case , snake_case ) a__ : Dict = tokens + [tokenizer.unk_token] a__ : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case ) def _snake_case ( self ) -> Any: """simple docstring""" a__ : int = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=snake_case ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=snake_case ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Optional[int] = self.tokenizer_class.from_pretrained("roberta-base" ) a__ : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=snake_case ) a__ : Any = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case ) a__ : int = tokenizer.encode( "sequence builders" , add_special_tokens=snake_case , add_prefix_space=snake_case ) a__ : Union[str, Any] = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=snake_case , add_prefix_space=snake_case ) a__ : int = tokenizer.build_inputs_with_special_tokens(snake_case ) a__ : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Any = self.get_tokenizer() a__ : Optional[int] = """Encode this sequence.""" a__ : int = tokenizer.byte_encoder[""" """.encode("utf-8" )[0]] # Testing encoder arguments a__ : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case , add_prefix_space=snake_case ) a__ : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(snake_case , snake_case ) a__ : str = tokenizer.encode(snake_case , add_special_tokens=snake_case , add_prefix_space=snake_case ) a__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(snake_case , snake_case ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) a__ : Any = tokenizer.encode(snake_case , add_special_tokens=snake_case ) a__ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(snake_case , snake_case ) # Testing spaces after special tokens a__ : Union[str, Any] = """<mask>""" tokenizer.add_special_tokens( {"mask_token": AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case )} ) # mask token has a left space a__ : List[str] = tokenizer.convert_tokens_to_ids(snake_case ) a__ : Tuple = """Encode <mask> sequence""" a__ : str = """Encode <mask>sequence""" a__ : int = tokenizer.encode(snake_case ) a__ : Union[str, Any] = encoded.index(snake_case ) a__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(snake_case , snake_case ) a__ : List[Any] = tokenizer.encode(snake_case ) a__ : Optional[Any] = encoded.index(snake_case ) a__ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(snake_case , snake_case ) def _snake_case ( self ) -> Dict: """simple docstring""" pass def _snake_case ( self ) -> Tuple: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) a__ : int = self.tokenizer_class.from_pretrained(snake_case , **snake_case ) a__ : Optional[int] = """A, <mask> AllenNLP sentence.""" a__ : Optional[int] = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case ) a__ : Any = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) a__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) a__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): a__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case ) a__ : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) a__ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , snake_case ) self.assertEqual(post_processor_state["add_prefix_space"] , snake_case ) self.assertEqual(post_processor_state["trim_offsets"] , snake_case ) def _snake_case ( self ) -> Tuple: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` a__ : List[Any] = F"""{text_of_1_token} {text_of_1_token}""" a__ : List[str] = self.rust_tokenizer_class.from_pretrained( snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case ) a__ : Optional[int] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(snake_case ) + 1, len(snake_case ) + 1 + len(snake_case )) , ) a__ : List[Any] = self.rust_tokenizer_class.from_pretrained( snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case ) a__ : Optional[Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(snake_case ) + 1, len(snake_case ) + 1 + len(snake_case )) , ) a__ : List[str] = self.rust_tokenizer_class.from_pretrained( snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case ) a__ : Optional[Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(snake_case ), len(snake_case ) + 1 + len(snake_case )) , ) a__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case ) a__ : Optional[Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(snake_case ), len(snake_case ) + 1 + len(snake_case )) , ) a__ : Tuple = F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case ) a__ : Optional[Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(snake_case ) + 1, 1 + len(snake_case ) + 1 + len(snake_case )) , ) a__ : Dict = self.rust_tokenizer_class.from_pretrained( snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case ) a__ : Union[str, Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(snake_case ), 1 + len(snake_case ) + 1 + len(snake_case )) , ) a__ : List[str] = self.rust_tokenizer_class.from_pretrained( snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case ) a__ : List[Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(snake_case ), 1 + len(snake_case ) + 1 + len(snake_case )) , )
708
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration SCREAMING_SNAKE_CASE__ : List[str] = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def _A ( lowerCamelCase ): a__ : Optional[int] = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(lowerCamelCase , lowerCamelCase ) SCREAMING_SNAKE_CASE__ : List[str] = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def _A ( lowerCamelCase ): a__ : Tuple = list(s_dict.keys() ) for key in keys: a__ : Optional[Any] = key for k, v in WHISPER_MAPPING.items(): if k in key: a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase ) print(F"""{key} -> {new_key}""" ) a__ : Dict = s_dict.pop(lowerCamelCase ) return s_dict def _A ( lowerCamelCase ): a__ , a__ : Any = emb.weight.shape a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) a__ : Optional[Any] = emb.weight.data return lin_layer def _A ( lowerCamelCase , lowerCamelCase ): os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) a__ : Optional[Any] = os.path.basename(lowerCamelCase ) a__ : List[Any] = url.split("/" )[-2] a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase ) if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ): raise RuntimeError(F"""{download_target} exists and is not a regular file""" ) if os.path.isfile(lowerCamelCase ): a__ : Any = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" ) with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output: with tqdm( total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop: while True: a__ : Optional[Any] = source.read(8192 ) if not buffer: break output.write(lowerCamelCase ) loop.update(len(lowerCamelCase ) ) a__ : Optional[int] = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes def _A ( lowerCamelCase , lowerCamelCase ): if ".pt" not in checkpoint_path: a__ : str = _download(_MODELS[checkpoint_path] ) else: a__ : str = torch.load(lowerCamelCase , map_location="cpu" ) a__ : Dict = original_checkpoint["dims"] a__ : Optional[int] = original_checkpoint["model_state_dict"] a__ : Any = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(lowerCamelCase ) rename_keys(lowerCamelCase ) a__ : Optional[Any] = True a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0] a__ : Tuple = WhisperConfig( vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , ) a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase ) a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F""" but all the following weights are missing {missing}""" ) if tie_embeds: a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens ) else: a__ : str = proj_out_weights model.save_pretrained(lowerCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() # # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
629
0
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __lowerCAmelCase : _UpperCamelCase : List[str] _UpperCamelCase : Optional[str] = None # Automatically constructed _UpperCamelCase : ClassVar[str] = "dict" _UpperCamelCase : ClassVar[Any] = None _UpperCamelCase : str = field(default="""Translation""" ,init=__A ,repr=__A ) def __call__( self ) -> List[Any]: """simple docstring""" return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def _snake_case ( self ) -> Dict: """simple docstring""" from .features import Value return {k: Value("string" ) for k in sorted(self.languages )} @dataclass class __lowerCAmelCase : _UpperCamelCase : Optional[List] = None _UpperCamelCase : Optional[int] = None _UpperCamelCase : Optional[str] = None # Automatically constructed _UpperCamelCase : ClassVar[str] = "dict" _UpperCamelCase : ClassVar[Any] = None _UpperCamelCase : str = field(default="""TranslationVariableLanguages""" ,init=__A ,repr=__A ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Any = sorted(set(self.languages ) ) if self.languages else None a__ : Dict = len(self.languages ) if self.languages else None def __call__( self ) -> Any: """simple docstring""" return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} ) def _snake_case ( self , snake_case ) -> str: """simple docstring""" a__ : Union[str, Any] = set(self.languages ) if self.languages and set(UpperCamelCase__ ) - lang_set: raise ValueError( F"""Some languages in example ({", ".join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase__ )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. a__ : List[str] = [] for lang, text in translation_dict.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. a__ : Dict = zip(*sorted(UpperCamelCase__ ) ) return {"language": languages, "translation": translations} def _snake_case ( self ) -> Dict: """simple docstring""" from .features import Sequence, Value return { "language": Sequence(Value("string" ) ), "translation": Sequence(Value("string" ) ), }
709
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[Any] = """informer""" _UpperCamelCase : Any = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]: """simple docstring""" a__ : Optional[Any] = prediction_length a__ : Optional[int] = context_length or prediction_length a__ : Optional[int] = distribution_output a__ : str = loss a__ : Optional[Any] = input_size a__ : int = num_time_features a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] a__ : Optional[int] = scaling a__ : List[str] = num_dynamic_real_features a__ : Optional[int] = num_static_real_features a__ : Optional[int] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) a__ : List[Any] = cardinality else: a__ : Tuple = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) a__ : Tuple = embedding_dimension else: a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] a__ : Optional[Any] = num_parallel_samples # Transformer architecture configuration a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features a__ : Union[str, Any] = d_model a__ : Any = encoder_attention_heads a__ : Optional[Any] = decoder_attention_heads a__ : int = encoder_ffn_dim a__ : List[Any] = decoder_ffn_dim a__ : List[str] = encoder_layers a__ : Any = decoder_layers a__ : List[str] = dropout a__ : int = attention_dropout a__ : List[Any] = activation_dropout a__ : Optional[int] = encoder_layerdrop a__ : Tuple = decoder_layerdrop a__ : Any = activation_function a__ : Tuple = init_std a__ : Optional[int] = use_cache # Informer a__ : Union[str, Any] = attention_type a__ : List[str] = sampling_factor a__ : Optional[int] = distil super().__init__(is_encoder_decoder=snake_case , **snake_case ) @property def _snake_case ( self ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
629
0
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : int = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( a__ ,unittest.TestCase ): _UpperCamelCase : List[str] = XLNetTokenizer _UpperCamelCase : Tuple = XLNetTokenizerFast _UpperCamelCase : int = True _UpperCamelCase : Optional[int] = True def _snake_case ( self ) -> Dict: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing a__ : str = XLNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : List[Any] = "<s>" a__ : str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "<eod>" ) self.assertEqual(len(lowerCamelCase_ ) , 1_006 ) def _snake_case ( self ) -> int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Union[str, Any] = XLNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) a__ : Dict = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [285, 46, 10, 170, 382] ) a__ : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) a__ : int = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : List[str] = XLNetTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ ) a__ : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + "", "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Dict = XLNetTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ ) a__ : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) @slow def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Optional[Any] = XLNetTokenizer.from_pretrained("xlnet-base-cased" ) a__ : str = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase_ ) a__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase_ ) a__ : Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ) a__ : Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : List[str] = {"input_ids": [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase_ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
710
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random""" SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random""" @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> List[Any]: """simple docstring""" return AutoConfig.from_pretrained(snake_case ) def _snake_case ( self ) -> Any: """simple docstring""" a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _snake_case ( self ) -> str: """simple docstring""" a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _snake_case ( self ) -> int: """simple docstring""" with self.assertRaises(snake_case ): create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
629
0
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class __lowerCAmelCase : def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=64 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> Dict: """simple docstring""" a__ : Optional[Any] = parent a__ : int = batch_size a__ : Optional[int] = seq_length a__ : Optional[int] = is_training a__ : Dict = use_input_mask a__ : Optional[Any] = use_token_type_ids a__ : str = use_labels a__ : Optional[int] = vocab_size a__ : Optional[Any] = hidden_size a__ : Optional[int] = embedding_size a__ : Union[str, Any] = num_hidden_layers a__ : Any = num_attention_heads a__ : Optional[Any] = intermediate_size a__ : Tuple = hidden_act a__ : List[str] = hidden_dropout_prob a__ : Any = attention_probs_dropout_prob a__ : Optional[int] = max_position_embeddings a__ : Any = type_vocab_size a__ : List[str] = type_sequence_label_size a__ : Any = initializer_range a__ : Tuple = num_labels a__ : str = num_choices a__ : Optional[int] = scope def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : Any = None if self.use_input_mask: a__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) a__ : Tuple = None if self.use_token_type_ids: a__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a__ : Union[str, Any] = None a__ : Optional[Any] = None a__ : str = None if self.use_labels: a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a__ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) a__ : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ) -> Dict: """simple docstring""" return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> int: """simple docstring""" a__ : Any = MobileBertModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() a__ : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) a__ : int = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) a__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]: """simple docstring""" a__ : List[Any] = MobileBertForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() a__ : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> int: """simple docstring""" a__ : Union[str, Any] = MobileBertForNextSentencePrediction(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() a__ : Union[str, Any] = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]: """simple docstring""" a__ : Tuple = MobileBertForPreTraining(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() a__ : str = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , next_sentence_label=UpperCamelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]: """simple docstring""" a__ : Any = MobileBertForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() a__ : Tuple = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]: """simple docstring""" a__ : Optional[int] = self.num_labels a__ : Dict = MobileBertForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() a__ : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]: """simple docstring""" a__ : Optional[Any] = self.num_labels a__ : Tuple = MobileBertForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() a__ : Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]: """simple docstring""" a__ : int = self.num_choices a__ : str = MobileBertForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() a__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : str = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ) -> Any: """simple docstring""" a__ : List[str] = self.prepare_config_and_inputs() ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : List[str] = config_and_inputs a__ : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowercase_ ,lowercase_ ,unittest.TestCase ): _UpperCamelCase : List[Any] = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCamelCase : int = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase : List[str] = True def _snake_case ( self , snake_case , snake_case , snake_case=False ) -> int: """simple docstring""" a__ : List[str] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if model_class in get_values(UpperCamelCase__ ): a__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ ) a__ : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) return inputs_dict def _snake_case ( self ) -> Any: """simple docstring""" a__ : Any = MobileBertModelTester(self ) a__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _snake_case ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self ) -> int: """simple docstring""" a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*UpperCamelCase__ ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCamelCase__ ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCamelCase__ ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCamelCase__ ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCamelCase__ ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCamelCase__ ) def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCamelCase__ ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCamelCase__ ) def _A ( lowerCamelCase ): return torch.tensor( lowerCamelCase , dtype=torch.long , device=lowerCamelCase , ) SCREAMING_SNAKE_CASE__ : int = 1e-3 @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Any = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(UpperCamelCase__ ) a__ : Dict = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] ) with torch.no_grad(): a__ : int = model(UpperCamelCase__ )[0] a__ : Tuple = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , UpperCamelCase__ ) a__ : int = torch.tensor( [ [ [-2.473_6526E07, 8.269_1656E04, 1.652_1838E05], [-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00], [2.604_7359E00, 1.567_7652E00, -1.732_4188E-01], ] ] , device=UpperCamelCase__ , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE a__ : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) a__ : int = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
711
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1""" SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2""" SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3""" SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4""" class __lowerCAmelCase ( _UpperCamelCase ): def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any: """simple docstring""" super()._init_() a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : int = StableDiffusionPipeline( vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _snake_case ( self ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )} def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a__ : List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def _snake_case ( self ) -> Tuple: """simple docstring""" self.enable_attention_slicing(snake_case ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" a__ : Any = "cuda" if torch.cuda.is_available() else "cpu" self.to(snake_case ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 a__ : Any = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.2 a__ : List[Any] = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.3 a__ : Optional[Any] = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.4 a__ : Dict = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
629
0
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def _A ( *lowerCamelCase , lowerCamelCase = None , lowerCamelCase=True , lowerCamelCase=2 ): from .. import __version__ a__ : Optional[Any] = take_from a__ : List[Any] = () if not isinstance(args[0] , __lowerCAmelCase ): a__ : Dict = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__lowerCAmelCase ).base_version ) >= version.parse(__lowerCAmelCase ): raise ValueError( F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'""" F""" version {__version__} is >= {version_name}""" ) a__ : str = None if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__lowerCAmelCase ),) a__ : str = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(__lowerCAmelCase , __lowerCAmelCase ): values += (getattr(__lowerCAmelCase , __lowerCAmelCase ),) a__ : List[Any] = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: a__ : str = F"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: a__ : Optional[int] = warning + " " if standard_warn else "" warnings.warn(warning + message , __lowerCAmelCase , stacklevel=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) > 0: a__ : List[str] = inspect.getouterframes(inspect.currentframe() )[1] a__ : Optional[int] = call_frame.filename a__ : Dict = call_frame.lineno a__ : str = call_frame.function a__ , a__ : Dict = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" ) if len(__lowerCAmelCase ) == 0: return elif len(__lowerCAmelCase ) == 1: return values[0] return values
712
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665 def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ): if fluid_density <= 0: raise ValueError("Impossible fluid density" ) if volume < 0: raise ValueError("Impossible Object volume" ) if gravity <= 0: raise ValueError("Impossible Gravity" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
629
0
from math import ceil def _A ( lowerCamelCase = 1001 ): a__ : Optional[int] = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): a__ : Optional[int] = 2 * i + 1 a__ : List[str] = 2 * i a__ : Optional[Any] = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: SCREAMING_SNAKE_CASE__ : Any = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number""")
713
from __future__ import annotations from random import random class __lowerCAmelCase : def __init__( self , snake_case = None ) -> Any: """simple docstring""" a__ : Optional[int] = value a__ : Tuple = random() a__ : Node | None = None a__ : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F"""'{self.value}: {self.prior:.5}'""" else: return pformat( {F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" a__ : List[Any] = str(self.value ) + " " a__ : List[str] = str(self.left or "" ) a__ : Tuple = str(self.right or "" ) return value + left + right def _A ( lowerCamelCase , lowerCamelCase ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: a__ , a__ : Dict = split(root.left , lowerCamelCase ) return left, root else: a__ , a__ : int = split(root.right , lowerCamelCase ) return root, right def _A ( lowerCamelCase , lowerCamelCase ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: a__ : List[Any] = merge(left.right , lowerCamelCase ) return left else: a__ : int = merge(lowerCamelCase , right.left ) return right def _A ( lowerCamelCase , lowerCamelCase ): a__ : Any = Node(lowerCamelCase ) a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase ) return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) def _A ( lowerCamelCase , lowerCamelCase ): a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 ) a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase ) return merge(lowerCamelCase , lowerCamelCase ) def _A ( lowerCamelCase ): if not root: # None return else: inorder(root.left ) print(root.value , end="," ) inorder(root.right ) def _A ( lowerCamelCase , lowerCamelCase ): for arg in args.split(): if arg[0] == "+": a__ : int = insert(lowerCamelCase , int(arg[1:] ) ) elif arg[0] == "-": a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) ) else: print("Unknown command" ) return root def _A ( ): a__ : List[str] = None print( "enter numbers to create a tree, + value to add value into treap, " "- value to erase all nodes with value. 'q' to quit. " ) a__ : int = input() while args != "q": a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase ) print(lowerCamelCase ) a__ : Optional[Any] = input() print("good by!" ) if __name__ == "__main__": import doctest doctest.testmod() main()
629
0
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( _a ): _UpperCamelCase : List[Any] = (DDIMParallelScheduler,) _UpperCamelCase : str = (("""eta""", 0.0), ("""num_inference_steps""", 50)) def _snake_case ( self , **snake_case ) -> Union[str, Any]: """simple docstring""" a__ : Optional[Any] = { "num_train_timesteps": 1_000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**_A ) return config def _snake_case ( self , **snake_case ) -> Union[str, Any]: """simple docstring""" a__ : Optional[int] = self.scheduler_classes[0] a__ : int = self.get_scheduler_config(**_A ) a__ : Any = scheduler_class(**_A ) a__ , a__ : List[str] = 10, 0.0 a__ : Optional[int] = self.dummy_model() a__ : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(_A ) for t in scheduler.timesteps: a__ : Optional[int] = model(_A , _A ) a__ : Dict = scheduler.step(_A , _A , _A , _A ).prev_sample return sample def _snake_case ( self ) -> List[str]: """simple docstring""" for timesteps in [100, 500, 1_000]: self.check_over_configs(num_train_timesteps=_A ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_A ) a__ : Union[str, Any] = self.scheduler_classes[0] a__ : List[Any] = self.get_scheduler_config(steps_offset=1 ) a__ : Union[str, Any] = scheduler_class(**_A ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def _snake_case ( self ) -> str: """simple docstring""" for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def _snake_case ( self ) -> int: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def _snake_case ( self ) -> Tuple: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def _snake_case ( self ) -> str: """simple docstring""" for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=_A ) def _snake_case ( self ) -> List[str]: """simple docstring""" for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=_A ) def _snake_case ( self ) -> List[Any]: """simple docstring""" self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def _snake_case ( self ) -> str: """simple docstring""" for t in [1, 10, 49]: self.check_over_forward(time_step=_A ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=_A , num_inference_steps=_A ) def _snake_case ( self ) -> Dict: """simple docstring""" for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=_A , eta=_A ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Tuple = self.scheduler_classes[0] a__ : Tuple = self.get_scheduler_config() a__ : str = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5 def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : str = self.scheduler_classes[0] a__ : Optional[Any] = self.get_scheduler_config() a__ : Tuple = scheduler_class(**_A ) a__ , a__ : Tuple = 10, 0.0 scheduler.set_timesteps(_A ) a__ : Union[str, Any] = self.dummy_model() a__ : Any = self.dummy_sample_deter a__ : int = self.dummy_sample_deter + 0.1 a__ : int = self.dummy_sample_deter - 0.1 a__ : Optional[int] = samplea.shape[0] a__ : List[str] = torch.stack([samplea, samplea, samplea] , dim=0 ) a__ : Any = torch.arange(_A )[0:3, None].repeat(1 , _A ) a__ : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) a__ : List[Any] = scheduler.batch_step_no_noise(_A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _A ) a__ : List[Any] = torch.sum(torch.abs(_A ) ) a__ : Tuple = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2 assert abs(result_mean.item() - 0.4_982 ) < 1E-3 def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Dict = self.full_loop() a__ : str = torch.sum(torch.abs(_A ) ) a__ : Any = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 172.0_067 ) < 1E-2 assert abs(result_mean.item() - 0.223_967 ) < 1E-3 def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : int = self.full_loop(prediction_type="v_prediction" ) a__ : List[str] = torch.sum(torch.abs(_A ) ) a__ : List[Any] = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 52.5_302 ) < 1E-2 assert abs(result_mean.item() - 0.0_684 ) < 1E-3 def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Dict = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 ) a__ : int = torch.sum(torch.abs(_A ) ) a__ : Union[str, Any] = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 149.8_295 ) < 1E-2 assert abs(result_mean.item() - 0.1_951 ) < 1E-3 def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : str = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 ) a__ : str = torch.sum(torch.abs(_A ) ) a__ : Dict = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 149.0_784 ) < 1E-2 assert abs(result_mean.item() - 0.1_941 ) < 1E-3
714
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ): _UpperCamelCase : Optional[int] = StableUnCLIPPipeline _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _UpperCamelCase : Any = False def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Any = 32 a__ : int = embedder_hidden_size # prior components torch.manual_seed(0 ) a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) a__ : Optional[Any] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) a__ : int = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , ) torch.manual_seed(0 ) a__ : str = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case ) a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) a__ : Union[str, Any] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) a__ : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , ) torch.manual_seed(0 ) a__ : Tuple = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , ) torch.manual_seed(0 ) a__ : Optional[int] = AutoencoderKL() a__ : Any = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _snake_case ( self , snake_case , snake_case=0 ) -> Dict: """simple docstring""" if str(snake_case ).startswith("mps" ): a__ : Union[str, Any] = torch.manual_seed(snake_case ) else: a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case ) a__ : Any = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Dict = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=snake_case ) def _snake_case ( self ) -> int: """simple docstring""" a__ : int = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=snake_case ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def _snake_case ( self ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 ) a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" ) a__ : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case , snake_case ) def _snake_case ( self ) -> Tuple: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) a__ : Union[str, Any] = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a__ : Union[str, Any] = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) a__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
629
0
def _A ( lowerCamelCase ): a__ : Optional[int] = False while is_sorted is False: # Until all the indices are traversed keep looping a__ : List[Any] = True for i in range(0 , len(UpperCAmelCase__ ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: a__ , a__ : Optional[int] = input_list[i + 1], input_list[i] # swapping if elements not in order a__ : List[str] = False for i in range(1 , len(UpperCAmelCase__ ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: a__ , a__ : int = input_list[i + 1], input_list[i] # swapping if elements not in order a__ : Any = False return input_list if __name__ == "__main__": print("""Enter list to be sorted""") SCREAMING_SNAKE_CASE__ : int = [int(x) for x in input().split()] # inputing elements of the list in one line SCREAMING_SNAKE_CASE__ : List[str] = odd_even_sort(input_list) print("""The sorted list is""") print(sorted_list)
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : str = { """configuration_distilbert""": [ """DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DistilBertConfig""", """DistilBertOnnxConfig""", ], """tokenization_distilbert""": ["""DistilBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ """DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DistilBertForMaskedLM""", """DistilBertForMultipleChoice""", """DistilBertForQuestionAnswering""", """DistilBertForSequenceClassification""", """DistilBertForTokenClassification""", """DistilBertModel""", """DistilBertPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = [ """TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDistilBertForMaskedLM""", """TFDistilBertForMultipleChoice""", """TFDistilBertForQuestionAnswering""", """TFDistilBertForSequenceClassification""", """TFDistilBertForTokenClassification""", """TFDistilBertMainLayer""", """TFDistilBertModel""", """TFDistilBertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any = [ """FlaxDistilBertForMaskedLM""", """FlaxDistilBertForMultipleChoice""", """FlaxDistilBertForQuestionAnswering""", """FlaxDistilBertForSequenceClassification""", """FlaxDistilBertForTokenClassification""", """FlaxDistilBertModel""", """FlaxDistilBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
from __future__ import annotations from collections.abc import Iterator class __lowerCAmelCase : def __init__( self , snake_case ) -> List[str]: """simple docstring""" a__ : Any = value a__ : Optional[int] = None a__ : str = None class __lowerCAmelCase : def __init__( self , snake_case ) -> Optional[Any]: """simple docstring""" a__ : str = tree def _snake_case ( self , snake_case ) -> Any: """simple docstring""" if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self ) -> Optional[int]: """simple docstring""" yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
716
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def _A ( lowerCamelCase ): # A local function to see if a dot lands in the circle. def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool: a__ : Any = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle a__ : Union[str, Any] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCamelCase ) ) # The ratio of the area for circle to square is pi/4. a__ : Any = proportion * 4 print(F"""The estimated value of pi is {pi_estimate}""" ) print(F"""The numpy value of pi is {pi}""" ) print(F"""The total error is {abs(pi - pi_estimate )}""" ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ): return mean( function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value) def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ): def identity_function(lowerCamelCase ) -> float: return x a__ : int = area_under_curve_estimator( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) a__ : int = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {expected_value}""" ) print(F"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def _A ( lowerCamelCase ): def function_to_integrate(lowerCamelCase ) -> float: return sqrt(4.0 - x * x ) a__ : int = area_under_curve_estimator( lowerCamelCase , lowerCamelCase , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {pi}""" ) print(F"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
629
0
from __future__ import annotations from typing import Any def _A ( lowerCamelCase ): if not postfix_notation: return 0 a__ : List[Any] = {"+", "-", "*", "/"} a__ : Optional[int] = [] for token in postfix_notation: if token in operations: a__ , a__ : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(lowerCamelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
717
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _A ( lowerCamelCase , lowerCamelCase ): a__ : Dict = old_name if "patch_embed" in old_name: a__ , a__ , a__ : Union[str, Any] = old_name.split("." ) if layer == "0": a__ : Union[str, Any] = old_name.replace("0" , "convolution1" ) elif layer == "1": a__ : Dict = old_name.replace("1" , "batchnorm_before" ) elif layer == "3": a__ : List[str] = old_name.replace("3" , "convolution2" ) else: a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" ) if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ): a__ : List[str] = r"\b\d{2}\b" if bool(re.search(lowerCamelCase , lowerCamelCase ) ): a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group() else: a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group() if int(match[0] ) < 6: a__ : List[Any] = old_name.replace(lowerCamelCase , "" ) a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] ) a__ : List[Any] = "intermediate_stages." + trimmed_name else: a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" ) if int(match[2] ) < num_meta4D_last_stage: a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] ) else: a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage ) a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index ) if "norm1" in old_name: a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" ) elif "norm2" in old_name: a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" ) elif "fc1" in old_name: a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" ) elif "fc2" in old_name: a__ : Any = trimmed_name.replace("fc2" , "linear_out" ) a__ : Any = "last_stage." + trimmed_name elif "network" in old_name and re.search(r".\d." , lowerCamelCase ): a__ : List[str] = old_name.replace("network" , "intermediate_stages" ) if "fc" in new_name: a__ : str = new_name.replace("fc" , "convolution" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): a__ : str = new_name.replace("norm1" , "batchnorm_before" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): a__ : Any = new_name.replace("norm2" , "batchnorm_after" ) if "proj" in new_name: a__ : Optional[int] = new_name.replace("proj" , "projection" ) if "dist_head" in new_name: a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" ) elif "head" in new_name: a__ : Optional[int] = new_name.replace("head" , "classifier" ) elif "patch_embed" in new_name: a__ : Tuple = "efficientformer." + new_name elif new_name == "norm.weight" or new_name == "norm.bias": a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" ) a__ : Optional[int] = "efficientformer." + new_name else: a__ : List[Any] = "efficientformer.encoder." + new_name return new_name def _A ( lowerCamelCase , lowerCamelCase ): for key in checkpoint.copy().keys(): a__ : Optional[Any] = checkpoint.pop(lowerCamelCase ) a__ : Dict = val return checkpoint def _A ( ): a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return image def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"] a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase ) a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase ) a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] ) a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1 a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() a__ : Dict = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } # prepare image a__ : str = prepare_img() a__ : Dict = 256 a__ : Union[str, Any] = 224 a__ : List[str] = EfficientFormerImageProcessor( size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , ) a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values # original processing pipeline a__ : List[str] = Compose( [ Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ), CenterCrop(lowerCamelCase ), ToTensor(), Normalize(lowerCamelCase , lowerCamelCase ), ] ) a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 ) assert torch.allclose(lowerCamelCase , lowerCamelCase ) a__ : Optional[int] = model(lowerCamelCase ) a__ : Any = outputs.logits a__ : Optional[Any] = (1, 1000) if "l1" in model_name: a__ : Tuple = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: a__ : int = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: a__ : Optional[Any] = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(lowerCamelCase ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print("Pushing model to the hub..." ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
629
0
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class __lowerCAmelCase : def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=64 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> List[str]: """simple docstring""" a__ : str = parent a__ : List[Any] = batch_size a__ : Dict = seq_length a__ : Optional[Any] = is_training a__ : Any = use_input_mask a__ : List[Any] = use_token_type_ids a__ : str = use_labels a__ : List[str] = vocab_size a__ : List[str] = hidden_size a__ : Union[str, Any] = embedding_size a__ : int = num_hidden_layers a__ : str = num_attention_heads a__ : Any = intermediate_size a__ : Dict = hidden_act a__ : Optional[int] = hidden_dropout_prob a__ : int = attention_probs_dropout_prob a__ : str = max_position_embeddings a__ : List[Any] = type_vocab_size a__ : str = type_sequence_label_size a__ : Dict = initializer_range a__ : Optional[Any] = num_labels a__ : Optional[Any] = num_choices a__ : Union[str, Any] = scope def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : str = None if self.use_input_mask: a__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a__ : str = None if self.use_token_type_ids: a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a__ : int = None a__ : Optional[Any] = None a__ : Optional[Any] = None if self.use_labels: a__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a__ : Any = ids_tensor([self.batch_size] , self.num_choices ) a__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ) -> Dict: """simple docstring""" return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Dict: """simple docstring""" a__ : int = MegatronBertModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) a__ : Optional[int] = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) a__ : int = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]: """simple docstring""" a__ : List[str] = MegatronBertForMaskedLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> str: """simple docstring""" a__ : Any = MegatronBertForCausalLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Optional[int] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Tuple: """simple docstring""" a__ : str = MegatronBertForNextSentencePrediction(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Tuple: """simple docstring""" a__ : Union[str, Any] = MegatronBertForPreTraining(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Optional[int] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , next_sentence_label=lowerCamelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]: """simple docstring""" a__ : Dict = MegatronBertForQuestionAnswering(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : int = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Tuple: """simple docstring""" a__ : Dict = self.num_labels a__ : Any = MegatronBertForSequenceClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Any = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> int: """simple docstring""" a__ : List[str] = self.num_labels a__ : str = MegatronBertForTokenClassification(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]: """simple docstring""" a__ : Optional[Any] = self.num_choices a__ : Tuple = MegatronBertForMultipleChoice(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : Any = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : str = self.prepare_config_and_inputs() ( a__ ) : Any = config_and_inputs a__ : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __lowerCAmelCase ,__lowerCAmelCase ,unittest.TestCase ): _UpperCamelCase : List[Any] = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) _UpperCamelCase : Optional[Any] = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase : Tuple = True # test_resize_embeddings = False _UpperCamelCase : List[str] = False def _snake_case ( self , snake_case , snake_case , snake_case=False ) -> List[Any]: """simple docstring""" a__ : List[Any] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) if return_labels: if model_class in get_values(lowerCamelCase__ ): a__ : Dict = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ ) a__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ ) return inputs_dict def _snake_case ( self ) -> Dict: """simple docstring""" a__ : int = MegatronBertModelTester(self ) a__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*lowerCamelCase__ ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCamelCase__ ) def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCamelCase__ ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCamelCase__ ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCamelCase__ ) def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCamelCase__ ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCamelCase__ ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCamelCase__ ) def _A ( lowerCamelCase ): return torch.tensor( lowerCamelCase , dtype=torch.long , device=lowerCamelCase , ) SCREAMING_SNAKE_CASE__ : Dict = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip("Model is not available." ) def _snake_case ( self ) -> Dict: """simple docstring""" a__ : Any = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: a__ : Union[str, Any] = os.path.join(os.environ["MYDIR"] , lowerCamelCase__ ) a__ : Dict = MegatronBertModel.from_pretrained(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.half() a__ : str = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] ) with torch.no_grad(): a__ : Any = model(lowerCamelCase__ )[0] a__ : int = torch.Size((1, 9, 1_024) ) self.assertEqual(output.shape , lowerCamelCase__ ) a__ : List[str] = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728] for ii in range(3 ): for jj in range(3 ): a__ : Optional[Any] = output[0, ii, jj] a__ : Dict = expected[3 * ii + jj] a__ : str = '''ii={} jj={} a={} b={}'''.format(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) self.assertTrue(math.isclose(lowerCamelCase__ , lowerCamelCase__ , rel_tol=lowerCamelCase__ , abs_tol=lowerCamelCase__ ) , msg=lowerCamelCase__ )
718
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE__ : Any = { """unc-nlp/lxmert-base-uncased""": 5_1_2, } SCREAMING_SNAKE_CASE__ : Optional[int] = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = LxmertTokenizer def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any: """simple docstring""" super().__init__( snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , ) a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars ): a__ : str = getattr(snake_case , normalizer_state.pop("type" ) ) a__ : Tuple = do_lower_case a__ : Union[str, Any] = strip_accents a__ : str = tokenize_chinese_chars a__ : List[str] = normalizer_class(**snake_case ) a__ : str = do_lower_case def _snake_case ( self , snake_case , snake_case=None ) -> List[str]: """simple docstring""" a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , snake_case , snake_case = None ) -> List[int]: """simple docstring""" a__ : Tuple = [self.sep_token_id] a__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]: """simple docstring""" a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case )
629
0
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. SCREAMING_SNAKE_CASE__ = 1_0 def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): for i in range(snake_case_ , snake_case_ ): if array[i] == target: return i return -1 def _A ( lowerCamelCase , lowerCamelCase ): a__ : Any = 0 a__ : int = len(snake_case_ ) while left <= right: if right - left < precision: return lin_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) a__ : List[Any] = (left + right) // 3 + 1 a__ : Any = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: a__ : Union[str, Any] = one_third - 1 elif array[two_third] < target: a__ : List[str] = two_third + 1 else: a__ : List[str] = one_third + 1 a__ : Union[str, Any] = two_third - 1 else: return -1 def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): if left < right: if right - left < precision: return lin_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) a__ : str = (left + right) // 3 + 1 a__ : Optional[Any] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(snake_case_ , one_third - 1 , snake_case_ , snake_case_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , snake_case_ , snake_case_ , snake_case_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case_ , snake_case_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip() SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip()) SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target) SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'Iterative search: {target} found at positions: {resulta}') print(f'Recursive search: {target} found at positions: {resulta}') else: print("""Not found""")
719
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[int] = """mobilenet_v2""" def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int: """simple docstring""" super().__init__(**snake_case ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) a__ : str = num_channels a__ : Dict = image_size a__ : Any = depth_multiplier a__ : str = depth_divisible_by a__ : Optional[int] = min_depth a__ : Dict = expand_ratio a__ : str = output_stride a__ : Optional[int] = first_layer_is_expansion a__ : Union[str, Any] = finegrained_output a__ : Union[str, Any] = hidden_act a__ : str = tf_padding a__ : List[Any] = classifier_dropout_prob a__ : List[Any] = initializer_range a__ : Optional[Any] = layer_norm_eps a__ : str = semantic_loss_ignore_index class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Any = version.parse("""1.11""" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _snake_case ( self ) -> float: """simple docstring""" return 1E-4
629
0
from collections.abc import Sequence def _A ( lowerCamelCase , lowerCamelCase ): return sum(c * (x**i) for i, c in enumerate(UpperCamelCase__ ) ) def _A ( lowerCamelCase , lowerCamelCase ): a__ : int = 0.0 for coeff in reversed(UpperCamelCase__ ): a__ : Tuple = result * x + coeff return result if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0) SCREAMING_SNAKE_CASE__ : List[str] = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
720
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def _A ( lowerCamelCase ): a__ : List[str] = [] if isinstance(lowerCamelCase , lowerCamelCase ): for v in tree.values(): shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase ): a__ : List[str] = [] for d in reversed(lowerCamelCase ): idx.append(flat_idx % d ) a__ : Union[str, Any] = flat_idx // d return tuple(reversed(lowerCamelCase ) ) @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ): # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(lowerCamelCase ) -> None: a__ : int = True for i in range(len(lowerCamelCase ) ): a__ : Optional[Any] = -1 * (i + 1) l[reversed_idx] &= tally a__ : Tuple = l[reversed_idx] if start_edges is None: a__ : Optional[int] = [s == 0 for s in start] reduce_edge_list(lowerCamelCase ) if end_edges is None: a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )] reduce_edge_list(lowerCamelCase ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCamelCase ) == 0: return [()] elif len(lowerCamelCase ) == 1: return [(slice(start[0] , end[0] + 1 ),)] a__ : List[Tuple[slice, ...]] = [] a__ : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCamelCase , lowerCamelCase ): if s == e: path_list.append(slice(lowerCamelCase , s + 1 ) ) else: break a__ : Tuple[slice, ...] = tuple(lowerCamelCase ) a__ : Optional[Any] = len(lowerCamelCase ) # start == end, and we're done if divergence_idx == len(lowerCamelCase ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : Optional[Any] = start[divergence_idx] return tuple( path + (slice(lowerCamelCase , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : List[str] = end[divergence_idx] return tuple( path + (slice(lowerCamelCase , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) a__ : Optional[int] = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : Optional[int] = t.shape[:no_batch_dims] a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) ) # _get_minimal_slice_set is inclusive a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) ) # Get an ordered list of slices to perform a__ : str = _get_minimal_slice_set( lowerCamelCase , lowerCamelCase , lowerCamelCase , ) a__ : Any = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ): if not (len(lowerCamelCase ) > 0): raise ValueError("Must provide at least one input" ) a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )] a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] ) def _prep_inputs(lowerCamelCase ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase ) a__ : str = None if _out is not None: a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) a__ : Optional[Any] = 1 for d in orig_batch_dims: flat_batch_dim *= d a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(lowerCamelCase ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t a__ : str = 0 a__ : Any = prepped_outputs for _ in range(lowerCamelCase ): # Chunk the input if not low_mem: a__ : str = _select_chunk else: a__ : Tuple = partial( _chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , ) a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase ) # Run the layer on the chunk a__ : Any = layer(**lowerCamelCase ) # Allocate space for the output if out is None: a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase ) # Put the chunk in its pre-allocated space if isinstance(lowerCamelCase , lowerCamelCase ): def assign(lowerCamelCase , lowerCamelCase ) -> None: for k, v in da.items(): if isinstance(lowerCamelCase , lowerCamelCase ): assign(lowerCamelCase , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: a__ : Dict = da[k] assign(lowerCamelCase , lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): for xa, xa in zip(lowerCamelCase , lowerCamelCase ): if _add_into_out: xa[i : i + chunk_size] += xa else: a__ : Dict = xa elif isinstance(lowerCamelCase , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: a__ : Dict = output_chunk else: raise ValueError("Not supported" ) i += chunk_size a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase ) return out class __lowerCAmelCase : def __init__( self , snake_case = 512 , ) -> List[str]: """simple docstring""" a__ : int = max_chunk_size a__ : Optional[int] = None a__ : Optional[tuple] = None def _snake_case ( self , snake_case , snake_case , snake_case ) -> int: """simple docstring""" logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] a__ : List[str] = [c for c in candidates if c > min_chunk_size] a__ : Optional[int] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(snake_case ) -> bool: try: with torch.no_grad(): fn(*snake_case , chunk_size=snake_case ) return True except RuntimeError: return False a__ : Union[str, Any] = 0 a__ : Dict = len(snake_case ) - 1 while i > min_viable_chunk_size_index: a__ : Any = test_chunk_size(candidates[i] ) if not viable: a__ : List[Any] = (min_viable_chunk_size_index + i) // 2 else: a__ : Tuple = i a__ : Any = (i + len(snake_case ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _snake_case ( self , snake_case , snake_case ) -> bool: """simple docstring""" a__ : str = True for aa, aa in zip(snake_case , snake_case ): assert type(snake_case ) == type(snake_case ) if isinstance(snake_case , (list, tuple) ): consistent &= self._compare_arg_caches(snake_case , snake_case ) elif isinstance(snake_case , snake_case ): a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] consistent &= self._compare_arg_caches(snake_case , snake_case ) else: consistent &= aa == aa return consistent def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int: """simple docstring""" a__ : List[Any] = True a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(snake_case ) a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case ) else: # Otherwise, we can reuse the precomputed value a__ : Optional[int] = False if not consistent: a__ : List[str] = self._determine_favorable_chunk_size( snake_case , snake_case , snake_case , ) a__ : List[str] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
629
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : List[Any] = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
721
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : int = """upernet""" def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]: """simple docstring""" super().__init__(**snake_case ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(snake_case , snake_case ): a__ : Optional[int] = backbone_config.get("model_type" ) a__ : str = CONFIG_MAPPING[backbone_model_type] a__ : str = config_class.from_dict(snake_case ) a__ : int = backbone_config a__ : Optional[Any] = hidden_size a__ : Optional[Any] = initializer_range a__ : Tuple = pool_scales a__ : Optional[Any] = use_auxiliary_head a__ : Optional[Any] = auxiliary_loss_weight a__ : Dict = auxiliary_in_channels a__ : Optional[int] = auxiliary_channels a__ : Any = auxiliary_num_convs a__ : Any = auxiliary_concat_input a__ : int = loss_ignore_index def _snake_case ( self ) -> str: """simple docstring""" a__ : Tuple = copy.deepcopy(self.__dict__ ) a__ : Optional[Any] = self.backbone_config.to_dict() a__ : List[Any] = self.__class__.model_type return output
629
0
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) def _A ( lowerCamelCase ): if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCamelCase ): return [[videos]] raise ValueError(F"""Could not make batched video from {videos}""" ) class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : List[str] = ["""pixel_values"""] def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = True , snake_case = None , snake_case = None , **snake_case , ) -> None: """simple docstring""" super().__init__(**snake_case ) a__ : List[Any] = size if size is not None else {"shortest_edge": 256} a__ : List[Any] = get_size_dict(snake_case , default_to_square=snake_case ) a__ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224} a__ : int = get_size_dict(snake_case , param_name="crop_size" ) a__ : Optional[int] = do_resize a__ : str = size a__ : Tuple = do_center_crop a__ : Dict = crop_size a__ : Union[str, Any] = resample a__ : Union[str, Any] = do_rescale a__ : Union[str, Any] = rescale_factor a__ : str = offset a__ : Tuple = do_normalize a__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _snake_case ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ) -> np.ndarray: """simple docstring""" a__ : Tuple = get_size_dict(snake_case , default_to_square=snake_case ) if "shortest_edge" in size: a__ : str = get_resize_output_image_size(snake_case , size["shortest_edge"] , default_to_square=snake_case ) elif "height" in size and "width" in size: a__ : Dict = (size["height"], size["width"]) else: raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case ) def _snake_case ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray: """simple docstring""" a__ : Union[str, Any] = get_size_dict(snake_case ) if "height" not in size or "width" not in size: raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case ) def _snake_case ( self , snake_case , snake_case , snake_case = True , snake_case = None , **snake_case , ) -> List[str]: """simple docstring""" a__ : Tuple = image.astype(np.floataa ) if offset: a__ : Tuple = image - (scale / 2) return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case ) def _snake_case ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray: """simple docstring""" return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case ) def _snake_case ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. a__ : Any = to_numpy_array(snake_case ) if do_resize: a__ : Optional[int] = self.resize(image=snake_case , size=snake_case , resample=snake_case ) if do_center_crop: a__ : Optional[Any] = self.center_crop(snake_case , size=snake_case ) if do_rescale: a__ : Union[str, Any] = self.rescale(image=snake_case , scale=snake_case , offset=snake_case ) if do_normalize: a__ : Optional[int] = self.normalize(image=snake_case , mean=snake_case , std=snake_case ) a__ : int = to_channel_dimension_format(snake_case , snake_case ) return image def _snake_case ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ) -> PIL.Image.Image: """simple docstring""" a__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize a__ : Union[str, Any] = resample if resample is not None else self.resample a__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop a__ : str = do_rescale if do_rescale is not None else self.do_rescale a__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor a__ : int = offset if offset is not None else self.offset a__ : int = do_normalize if do_normalize is not None else self.do_normalize a__ : List[str] = image_mean if image_mean is not None else self.image_mean a__ : Tuple = image_std if image_std is not None else self.image_std a__ : Dict = size if size is not None else self.size a__ : Tuple = get_size_dict(snake_case , default_to_square=snake_case ) a__ : Any = crop_size if crop_size is not None else self.crop_size a__ : List[Any] = get_size_dict(snake_case , param_name="crop_size" ) if not valid_images(snake_case ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) a__ : Dict = make_batched(snake_case ) a__ : Dict = [ [ self._preprocess_image( image=snake_case , do_resize=snake_case , size=snake_case , resample=snake_case , do_center_crop=snake_case , crop_size=snake_case , do_rescale=snake_case , rescale_factor=snake_case , offset=snake_case , do_normalize=snake_case , image_mean=snake_case , image_std=snake_case , data_format=snake_case , ) for img in video ] for video in videos ] a__ : int = {"pixel_values": videos} return BatchFeature(data=snake_case , tensor_type=snake_case )
700
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): SCREAMING_SNAKE_CASE__ : int = { """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: SCREAMING_SNAKE_CASE__ : Dict = { """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _A ( lowerCamelCase ): a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 ) a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() a__ : int = numpy_to_pil(lowerCamelCase ) return images def _A ( lowerCamelCase ): if images.ndim == 3: a__ : Tuple = images[None, ...] a__ : Dict = (images * 255).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
629
0
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] SCREAMING_SNAKE_CASE__ : int = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] SCREAMING_SNAKE_CASE__ : Optional[int] = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) SCREAMING_SNAKE_CASE__ : Any = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def _A ( lowerCamelCase , lowerCamelCase ): for tf_name, hf_name in patterns: a__ : Dict = k.replace(lowerCamelCase , lowerCamelCase ) return k def _A ( lowerCamelCase , lowerCamelCase ): a__ : Optional[int] = BigBirdPegasusConfig(**lowerCamelCase ) a__ : int = BigBirdPegasusForConditionalGeneration(lowerCamelCase ) a__ : List[Any] = torch_model.state_dict() a__ : List[Any] = {} # separating decoder weights a__ : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )} a__ : Optional[Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )} for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ): a__ : Tuple = [k.endswith(lowerCamelCase ) for ending in KEYS_TO_IGNORE] if any(lowerCamelCase ): continue a__ : List[str] = DECODER_PATTERNS a__ : Optional[int] = rename_state_dict_key(lowerCamelCase , lowerCamelCase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): a__ : str = v.T a__ : str = torch.from_numpy(lowerCamelCase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ): a__ : int = [k.endswith(lowerCamelCase ) for ending in KEYS_TO_IGNORE] if any(lowerCamelCase ): continue a__ : Tuple = REMAINING_PATTERNS a__ : Dict = rename_state_dict_key(lowerCamelCase , lowerCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): a__ : Tuple = v.T a__ : str = torch.from_numpy(lowerCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" a__ : int = mapping["model.embed_positions.weight"] a__ : List[str] = mapping.pop("model.embed_positions.weight" ) a__ : int = torch_model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) a__ : Optional[Any] = [ k for k in missing if k not in [ "final_logits_bias", "model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight", ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def _A ( lowerCamelCase ): a__ : List[Any] = tf.train.list_variables(lowerCamelCase ) a__ : str = {} a__ : Any = ["global_step"] for name, shape in tqdm(lowerCamelCase , desc="converting tf checkpoint to dict" ): a__ : List[str] = any(pat in name for pat in ignore_name ) if skip_key: continue a__ : List[str] = tf.train.load_variable(lowerCamelCase , lowerCamelCase ) a__ : List[Any] = array return tf_weights def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : Union[str, Any] = get_tf_weights_as_numpy(lowerCamelCase ) a__ : Union[str, Any] = convert_bigbird_pegasus(lowerCamelCase , lowerCamelCase ) torch_model.save_pretrained(lowerCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ : int = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
701
# Lint as: python3 import itertools import os import re SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""") SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""") SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""") SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""") SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*""" def _A ( lowerCamelCase ): a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase ) a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase ) return name.lower() def _A ( lowerCamelCase ): a__ : Tuple = _single_underscore_re.split(lowerCamelCase ) a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" ) def _A ( lowerCamelCase ): if os.path.basename(lowerCamelCase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) return camelcase_to_snakecase(lowerCamelCase ) def _A ( lowerCamelCase , lowerCamelCase ): if os.path.basename(lowerCamelCase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) if not re.match(_split_re , lowerCamelCase ): raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" ) return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}""" def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ): a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase ) if filetype_suffix: prefix += F""".{filetype_suffix}""" a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase ) return F"""{filepath}*""" def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ): a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase ) a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase ) if shard_lengths: a__ : List[str] = len(lowerCamelCase ) a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )] if filetype_suffix: a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames] return filenames else: a__ : Optional[int] = prefix if filetype_suffix: filename += F""".{filetype_suffix}""" return [filename]
629
0
def _A ( lowerCamelCase ): a__ : int = len(lowerCamelCase ) a__ : Any = len(matrix[0] ) a__ : Dict = min(lowerCamelCase , lowerCamelCase ) for row in range(lowerCamelCase ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , lowerCamelCase ): a__ : Union[str, Any] = matrix[col][row] / matrix[row][row] for i in range(lowerCamelCase , lowerCamelCase ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows a__ : Optional[Any] = True for i in range(row + 1 , lowerCamelCase ): if matrix[i][row] != 0: a__ : List[str] = matrix[i], matrix[row] a__ : Optional[Any] = False break if reduce: rank -= 1 for i in range(lowerCamelCase ): a__ : int = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ """SEW_PRETRAINED_MODEL_ARCHIVE_LIST""", """SEWForCTC""", """SEWForSequenceClassification""", """SEWModel""", """SEWPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
SCREAMING_SNAKE_CASE__ : Dict = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : Union[str, Any] = set() # keep track of all the paths to be checked a__ : Any = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue a__ : Dict = queue.pop(0 ) # get the last node from the path a__ : int = path[-1] if node not in explored: a__ : Dict = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: a__ : List[Any] = list(lowerCamelCase ) new_path.append(lowerCamelCase ) queue.append(lowerCamelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowerCamelCase ) # in case there's no path between the 2 nodes return [] def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 a__ : Optional[Any] = [start] a__ : Tuple = set(lowerCamelCase ) # Keep tab on distances from `start` node. a__ : str = {start: 0, target: -1} while queue: a__ : Dict = queue.pop(0 ) if node == target: a__ : Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowerCamelCase ) queue.append(lowerCamelCase ) a__ : str = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
703
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : int = { """configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""], """tokenization_cpmant""": ["""CpmAntTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = [ """CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""", """CpmAntForCausalLM""", """CpmAntModel""", """CpmAntPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class __lowerCAmelCase : _UpperCamelCase : str = field( metadata={"""help""": """The output directory where the model will be written."""} ,) _UpperCamelCase : str = field( metadata={ """help""": ( """The encoder model checkpoint for weights initialization.""" """Don't set if you want to train an encoder model from scratch.""" ) } ,) _UpperCamelCase : str = field( metadata={ """help""": ( """The decoder model checkpoint for weights initialization.""" """Don't set if you want to train a decoder model from scratch.""" ) } ,) _UpperCamelCase : Optional[str] = field( default=_UpperCamelCase ,metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} ) _UpperCamelCase : Optional[str] = field( default=_UpperCamelCase ,metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} ) def _A ( ): a__ : List[Any] = HfArgumentParser((ModelArguments,) ) (a__ ) : Tuple = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: a__ : Tuple = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: a__ : List[str] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: a__ : int = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: a__ : List[Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed a__ : List[str] = True a__ : Optional[Any] = True a__ : Dict = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCamelCase , decoder_config=lowerCamelCase , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens a__ : Dict = decoder_config.decoder_start_token_id a__ : List[str] = decoder_config.pad_token_id if decoder_start_token_id is None: a__ : Optional[Any] = decoder_config.bos_token_id if pad_token_id is None: a__ : Dict = decoder_config.eos_token_id # This is necessary to make Flax's generate() work a__ : Any = decoder_config.eos_token_id a__ : Dict = decoder_start_token_id a__ : Optional[int] = pad_token_id a__ : int = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) a__ : Optional[int] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) a__ : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
704
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin SCREAMING_SNAKE_CASE__ : Dict = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ): def _snake_case ( self ) -> str: """simple docstring""" a__ : Optional[int] = load_tool("text-question-answering" ) self.tool.setup() a__ : Dict = load_tool("text-question-answering" , remote=snake_case ) def _snake_case ( self ) -> Dict: """simple docstring""" a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> Any: """simple docstring""" a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" ) def _snake_case ( self ) -> int: """simple docstring""" a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
629
0
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : int = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ): _UpperCamelCase : Optional[int] = AlbertTokenizer _UpperCamelCase : Dict = AlbertTokenizerFast _UpperCamelCase : Tuple = True _UpperCamelCase : Optional[int] = True _UpperCamelCase : List[Any] = True def _snake_case ( self ) -> str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing a__ : List[Any] = AlbertTokenizer(snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self , snake_case ) -> Union[str, Any]: """simple docstring""" a__ : Dict = "this is a test" a__ : Union[str, Any] = "this is a test" return input_text, output_text def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Dict = "<pad>" a__ : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "▁eloquent" ) self.assertEqual(len(snake_case ) , 30_000 ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def _snake_case ( self ) -> Dict: """simple docstring""" if not self.test_rust_tokenizer: return a__ : int = self.get_tokenizer() a__ : int = self.get_rust_tokenizer() a__ : int = "I was born in 92000, and this is falsé." a__ : Optional[int] = tokenizer.tokenize(snake_case ) a__ : Tuple = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) a__ : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case ) a__ : Tuple = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) a__ : List[str] = self.get_rust_tokenizer() a__ : Dict = tokenizer.encode(snake_case ) a__ : Union[str, Any] = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Union[str, Any] = AlbertTokenizer(snake_case , keep_accents=snake_case ) a__ : Tuple = tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case , ["▁this", "▁is", "▁a", "▁test"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [48, 25, 21, 1_289] ) a__ : int = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( snake_case , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] ) a__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case ) self.assertListEqual(snake_case , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] ) a__ : Dict = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual( snake_case , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ : Tuple = AlbertTokenizer(snake_case ) a__ : Optional[int] = tokenizer.encode("sequence builders" ) a__ : Union[str, Any] = tokenizer.encode("multi-sequence build" ) a__ : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case ) a__ : str = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Tuple = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
705
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowerCAmelCase ( _UpperCamelCase ): @require_torch def _snake_case ( self ) -> str: """simple docstring""" a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__ : Tuple = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__ : Dict = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Tuple = "1" a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__ : List[Any] = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__ : Any = self.get_env() a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n " a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n " a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " # baseline - just load from_pretrained with normal network a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a__ : str = self.get_env() a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # next emulate no network a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Union[str, Any] = "1" a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Optional[Any] = "\nfrom transformers import pipeline\n " a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n " a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " a__ : List[str] = self.get_env() a__ : Union[str, Any] = "1" a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )] a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , ) @require_torch def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Any = "\nfrom transformers import AutoModel\n " a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n " # baseline - just load from_pretrained with normal network a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a__ : Optional[Any] = self.get_env() a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Dict = "1" a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() )
629
0
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name SCREAMING_SNAKE_CASE__ = 2_5_6 class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : str = ["""melgan"""] def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> None: """simple docstring""" super().__init__() # From MELGAN a__ : Dict = math.log(1E-5 ) # Matches MelGAN training. a__ : Any = 4.0 # Largest value for most examples a__ : int = 128 self.register_modules( notes_encoder=snake_case , continuous_encoder=snake_case , decoder=snake_case , scheduler=snake_case , melgan=snake_case , ) def _snake_case ( self , snake_case , snake_case=(-1.0, 1.0) , snake_case=False ) -> int: """simple docstring""" a__ : Any = output_range if clip: a__ : Tuple = torch.clip(snake_case , self.min_value , self.max_value ) # Scale to [0, 1]. a__ : Optional[Any] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def _snake_case ( self , snake_case , snake_case=(-1.0, 1.0) , snake_case=False ) -> Optional[int]: """simple docstring""" a__ : str = input_range a__ : Tuple = torch.clip(snake_case , snake_case , snake_case ) if clip else outputs # Scale to [0, 1]. a__ : str = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def _snake_case ( self , snake_case , snake_case , snake_case ) -> str: """simple docstring""" a__ : str = input_tokens > 0 a__ : Dict = self.notes_encoder( encoder_input_tokens=snake_case , encoder_inputs_mask=snake_case ) a__ : int = self.continuous_encoder( encoder_inputs=snake_case , encoder_inputs_mask=snake_case ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def _snake_case ( self , snake_case , snake_case , snake_case ) -> Union[str, Any]: """simple docstring""" a__ : Union[str, Any] = noise_time if not torch.is_tensor(snake_case ): a__ : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(snake_case ) and len(timesteps.shape ) == 0: a__ : str = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML a__ : Tuple = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) a__ : List[Any] = self.decoder( encodings_and_masks=snake_case , decoder_input_tokens=snake_case , decoder_noise_time=snake_case ) return logits @torch.no_grad() def __call__( self , snake_case , snake_case = None , snake_case = 100 , snake_case = True , snake_case = "numpy" , snake_case = None , snake_case = 1 , ) -> Union[AudioPipelineOutput, Tuple]: """simple docstring""" if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(snake_case )}.""" ) a__ : Tuple = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) a__ : int = np.zeros([1, 0, self.n_dims] , np.floataa ) a__ : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=snake_case , device=self.device ) for i, encoder_input_tokens in enumerate(snake_case ): if i == 0: a__ : Dict = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. a__ : int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=snake_case , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. a__ : str = ones a__ : Tuple = self.scale_features( snake_case , output_range=[-1.0, 1.0] , clip=snake_case ) a__ : Dict = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=snake_case , continuous_mask=snake_case , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop a__ : Dict = randn_tensor( shape=encoder_continuous_inputs.shape , generator=snake_case , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(snake_case ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a__ : str = self.decode( encodings_and_masks=snake_case , input_tokens=snake_case , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 a__ : Any = self.scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample a__ : Optional[int] = self.scale_to_features(snake_case , input_range=[-1.0, 1.0] ) a__ : Optional[int] = mel[:1] a__ : Union[str, Any] = mel.cpu().float().numpy() a__ : Dict = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case , snake_case ) logger.info("Generated segment" , snake_case ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": a__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: a__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=snake_case )
706
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ """VAN_PRETRAINED_MODEL_ARCHIVE_LIST""", """VanForImageClassification""", """VanModel""", """VanPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
629
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : str = { """configuration_distilbert""": [ """DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DistilBertConfig""", """DistilBertOnnxConfig""", ], """tokenization_distilbert""": ["""DistilBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ """DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DistilBertForMaskedLM""", """DistilBertForMultipleChoice""", """DistilBertForQuestionAnswering""", """DistilBertForSequenceClassification""", """DistilBertForTokenClassification""", """DistilBertModel""", """DistilBertPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = [ """TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDistilBertForMaskedLM""", """TFDistilBertForMultipleChoice""", """TFDistilBertForQuestionAnswering""", """TFDistilBertForSequenceClassification""", """TFDistilBertForTokenClassification""", """TFDistilBertMainLayer""", """TFDistilBertModel""", """TFDistilBertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any = [ """FlaxDistilBertForMaskedLM""", """FlaxDistilBertForMultipleChoice""", """FlaxDistilBertForQuestionAnswering""", """FlaxDistilBertForSequenceClassification""", """FlaxDistilBertForTokenClassification""", """FlaxDistilBertModel""", """FlaxDistilBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
707
from PIL import Image def _A ( lowerCamelCase , lowerCamelCase ): def brightness(lowerCamelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("level must be between -255.0 (black) and 255.0 (white)" ) return img.point(lowerCamelCase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
629
0
from __future__ import annotations from math import gcd def _A ( lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError("The input value cannot be less than 2" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int: return (pow(lowerCamelCase , 2 ) + step) % modulus for _ in range(lowerCamelCase ): # These track the position within the cycle detection logic. a__ : str = seed a__ : Optional[int] = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. a__ : Union[str, Any] = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase ) a__ : Optional[Any] = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase ) a__ : Tuple = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. a__ : Optional[Any] = gcd(hare - tortoise , lowerCamelCase ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. a__ : Optional[Any] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() parser.add_argument( """num""", type=int, help="""The value to find a divisor of""", ) parser.add_argument( """--attempts""", type=int, default=3, help="""The number of attempts before giving up""", ) SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Optional[int] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'{args.num} is probably prime') else: SCREAMING_SNAKE_CASE__ : Optional[int] = args.num // divisor print(f'{args.num} = {divisor} * {quotient}')
708
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration SCREAMING_SNAKE_CASE__ : List[str] = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def _A ( lowerCamelCase ): a__ : Optional[int] = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(lowerCamelCase , lowerCamelCase ) SCREAMING_SNAKE_CASE__ : List[str] = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def _A ( lowerCamelCase ): a__ : Tuple = list(s_dict.keys() ) for key in keys: a__ : Optional[Any] = key for k, v in WHISPER_MAPPING.items(): if k in key: a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase ) print(F"""{key} -> {new_key}""" ) a__ : Dict = s_dict.pop(lowerCamelCase ) return s_dict def _A ( lowerCamelCase ): a__ , a__ : Any = emb.weight.shape a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) a__ : Optional[Any] = emb.weight.data return lin_layer def _A ( lowerCamelCase , lowerCamelCase ): os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) a__ : Optional[Any] = os.path.basename(lowerCamelCase ) a__ : List[Any] = url.split("/" )[-2] a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase ) if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ): raise RuntimeError(F"""{download_target} exists and is not a regular file""" ) if os.path.isfile(lowerCamelCase ): a__ : Any = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" ) with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output: with tqdm( total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop: while True: a__ : Optional[Any] = source.read(8192 ) if not buffer: break output.write(lowerCamelCase ) loop.update(len(lowerCamelCase ) ) a__ : Optional[int] = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes def _A ( lowerCamelCase , lowerCamelCase ): if ".pt" not in checkpoint_path: a__ : str = _download(_MODELS[checkpoint_path] ) else: a__ : str = torch.load(lowerCamelCase , map_location="cpu" ) a__ : Dict = original_checkpoint["dims"] a__ : Optional[int] = original_checkpoint["model_state_dict"] a__ : Any = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(lowerCamelCase ) rename_keys(lowerCamelCase ) a__ : Optional[Any] = True a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0] a__ : Tuple = WhisperConfig( vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , ) a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase ) a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F""" but all the following weights are missing {missing}""" ) if tie_embeds: a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens ) else: a__ : str = proj_out_weights model.save_pretrained(lowerCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() # # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
629
0
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowerCAmelCase ( _UpperCamelCase ): @require_torch def _snake_case ( self ) -> str: """simple docstring""" a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__ : Tuple = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__ : Dict = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Tuple = "1" a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> List[Any]: """simple docstring""" a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__ : List[Any] = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__ : Any = self.get_env() a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n " a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n " a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " # baseline - just load from_pretrained with normal network a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a__ : str = self.get_env() a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # next emulate no network a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Union[str, Any] = "1" a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : Optional[Any] = "\nfrom transformers import pipeline\n " a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n " a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " a__ : List[str] = self.get_env() a__ : Union[str, Any] = "1" a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )] a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , ) @require_torch def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Any = "\nfrom transformers import AutoModel\n " a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n " # baseline - just load from_pretrained with normal network a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )] # should succeed a__ : Optional[Any] = self.get_env() a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Dict = "1" a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() )
709
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[Any] = """informer""" _UpperCamelCase : Any = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]: """simple docstring""" a__ : Optional[Any] = prediction_length a__ : Optional[int] = context_length or prediction_length a__ : Optional[int] = distribution_output a__ : str = loss a__ : Optional[Any] = input_size a__ : int = num_time_features a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] a__ : Optional[int] = scaling a__ : List[str] = num_dynamic_real_features a__ : Optional[int] = num_static_real_features a__ : Optional[int] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) a__ : List[Any] = cardinality else: a__ : Tuple = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) a__ : Tuple = embedding_dimension else: a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] a__ : Optional[Any] = num_parallel_samples # Transformer architecture configuration a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features a__ : Union[str, Any] = d_model a__ : Any = encoder_attention_heads a__ : Optional[Any] = decoder_attention_heads a__ : int = encoder_ffn_dim a__ : List[Any] = decoder_ffn_dim a__ : List[str] = encoder_layers a__ : Any = decoder_layers a__ : List[str] = dropout a__ : int = attention_dropout a__ : List[Any] = activation_dropout a__ : Optional[int] = encoder_layerdrop a__ : Tuple = decoder_layerdrop a__ : Any = activation_function a__ : Tuple = init_std a__ : Optional[int] = use_cache # Informer a__ : Union[str, Any] = attention_type a__ : List[str] = sampling_factor a__ : Optional[int] = distil super().__init__(is_encoder_decoder=snake_case , **snake_case ) @property def _snake_case ( self ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
629
0
from typing import TYPE_CHECKING from ...utils import _LazyModule SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
710
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random""" SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random""" @require_torch class __lowerCAmelCase ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> List[Any]: """simple docstring""" return AutoConfig.from_pretrained(snake_case ) def _snake_case ( self ) -> Any: """simple docstring""" a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _snake_case ( self ) -> str: """simple docstring""" a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) def _snake_case ( self ) -> List[str]: """simple docstring""" a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _snake_case ( self ) -> int: """simple docstring""" with self.assertRaises(snake_case ): create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
629
0
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger SCREAMING_SNAKE_CASE__ : Optional[Any] = get_logger(__name__) class __lowerCAmelCase ( enum.Enum ): _UpperCamelCase : List[Any] = """all_checks""" _UpperCamelCase : Tuple = """basic_checks""" _UpperCamelCase : int = """no_checks""" class __lowerCAmelCase ( _UpperCamelCase ): pass class __lowerCAmelCase ( _UpperCamelCase ): pass class __lowerCAmelCase ( _UpperCamelCase ): pass class __lowerCAmelCase ( _UpperCamelCase ): pass def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None ): if expected_checksums is None: logger.info("Unable to verify checksums." ) return if len(set(lowerCamelCase ) - set(lowerCamelCase ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(lowerCamelCase ) - set(lowerCamelCase ) ) ) if len(set(lowerCamelCase ) - set(lowerCamelCase ) ) > 0: raise UnexpectedDownloadedFile(str(set(lowerCamelCase ) - set(lowerCamelCase ) ) ) a__ : Any = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] a__ : str = " for " + verification_name if verification_name is not None else "" if len(lowerCamelCase ) > 0: raise NonMatchingChecksumError( F"""Checksums didn't match{for_verification_name}:\n""" F"""{bad_urls}\n""" "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" ) logger.info("All the checksums matched successfully" + for_verification_name ) class __lowerCAmelCase ( _UpperCamelCase ): pass class __lowerCAmelCase ( _UpperCamelCase ): pass class __lowerCAmelCase ( _UpperCamelCase ): pass class __lowerCAmelCase ( _UpperCamelCase ): pass def _A ( lowerCamelCase , lowerCamelCase ): if expected_splits is None: logger.info("Unable to verify splits sizes." ) return if len(set(lowerCamelCase ) - set(lowerCamelCase ) ) > 0: raise ExpectedMoreSplits(str(set(lowerCamelCase ) - set(lowerCamelCase ) ) ) if len(set(lowerCamelCase ) - set(lowerCamelCase ) ) > 0: raise UnexpectedSplits(str(set(lowerCamelCase ) - set(lowerCamelCase ) ) ) a__ : List[Any] = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(lowerCamelCase ) > 0: raise NonMatchingSplitsSizesError(str(lowerCamelCase ) ) logger.info("All the splits matched successfully." ) def _A ( lowerCamelCase , lowerCamelCase = True ): if record_checksum: a__ : Tuple = shaaaa() with open(lowerCamelCase , "rb" ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , b"" ): m.update(lowerCamelCase ) a__ : Optional[Any] = m.hexdigest() else: a__ : Optional[int] = None return {"num_bytes": os.path.getsize(lowerCamelCase ), "checksum": checksum} def _A ( lowerCamelCase ): if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
711
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1""" SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2""" SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3""" SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4""" class __lowerCAmelCase ( _UpperCamelCase ): def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any: """simple docstring""" super()._init_() a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case ) a__ : int = StableDiffusionPipeline( vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _snake_case ( self ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )} def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a__ : List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def _snake_case ( self ) -> Tuple: """simple docstring""" self.enable_attention_slicing(snake_case ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" return self.pipea( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) @torch.no_grad() def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict: """simple docstring""" a__ : Any = "cuda" if torch.cuda.is_available() else "cpu" self.to(snake_case ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 a__ : Any = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.2 a__ : List[Any] = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.3 a__ : Optional[Any] = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get first result from Stable Diffusion Checkpoint v1.4 a__ : Dict = self.textaimg_sda_a( prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
629
0
def _A ( lowerCamelCase ): a__ : Union[str, Any] = [], [] while len(lowerCamelCase ) > 1: a__ : List[str] = min(lowerCamelCase ), max(lowerCamelCase ) start.append(lowerCamelCase ) end.append(lowerCamelCase ) collection.remove(lowerCamelCase ) collection.remove(lowerCamelCase ) end.reverse() return start + collection + end if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Dict = input("""Enter numbers separated by a comma:\n""").strip() SCREAMING_SNAKE_CASE__ : Tuple = [int(item) for item in user_input.split(""",""")] print(*merge_sort(unsorted), sep=""",""")
712
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665 def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ): if fluid_density <= 0: raise ValueError("Impossible fluid density" ) if volume < 0: raise ValueError("Impossible Object volume" ) if gravity <= 0: raise ValueError("Impossible Gravity" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
629
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ : str = { """vocab_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt""" ), """squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""", """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli""": ( """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE__ : List[str] = { """squeezebert/squeezebert-uncased""": 5_1_2, """squeezebert/squeezebert-mnli""": 5_1_2, """squeezebert/squeezebert-mnli-headless""": 5_1_2, } SCREAMING_SNAKE_CASE__ : Optional[Any] = { """squeezebert/squeezebert-uncased""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True}, } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Tuple = VOCAB_FILES_NAMES _UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : int = SqueezeBertTokenizer def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Dict: """simple docstring""" super().__init__( snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , ) a__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars ): a__ : Tuple = getattr(snake_case , normalizer_state.pop("type" ) ) a__ : Dict = do_lower_case a__ : int = strip_accents a__ : Dict = tokenize_chinese_chars a__ : Optional[Any] = normalizer_class(**snake_case ) a__ : Tuple = do_lower_case def _snake_case ( self , snake_case , snake_case=None ) -> int: """simple docstring""" a__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , snake_case , snake_case = None ) -> List[int]: """simple docstring""" a__ : List[Any] = [self.sep_token_id] a__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]: """simple docstring""" a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case )
713
from __future__ import annotations from random import random class __lowerCAmelCase : def __init__( self , snake_case = None ) -> Any: """simple docstring""" a__ : Optional[int] = value a__ : Tuple = random() a__ : Node | None = None a__ : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F"""'{self.value}: {self.prior:.5}'""" else: return pformat( {F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" a__ : List[Any] = str(self.value ) + " " a__ : List[str] = str(self.left or "" ) a__ : Tuple = str(self.right or "" ) return value + left + right def _A ( lowerCamelCase , lowerCamelCase ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: a__ , a__ : Dict = split(root.left , lowerCamelCase ) return left, root else: a__ , a__ : int = split(root.right , lowerCamelCase ) return root, right def _A ( lowerCamelCase , lowerCamelCase ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: a__ : List[Any] = merge(left.right , lowerCamelCase ) return left else: a__ : int = merge(lowerCamelCase , right.left ) return right def _A ( lowerCamelCase , lowerCamelCase ): a__ : Any = Node(lowerCamelCase ) a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase ) return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) def _A ( lowerCamelCase , lowerCamelCase ): a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 ) a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase ) return merge(lowerCamelCase , lowerCamelCase ) def _A ( lowerCamelCase ): if not root: # None return else: inorder(root.left ) print(root.value , end="," ) inorder(root.right ) def _A ( lowerCamelCase , lowerCamelCase ): for arg in args.split(): if arg[0] == "+": a__ : int = insert(lowerCamelCase , int(arg[1:] ) ) elif arg[0] == "-": a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) ) else: print("Unknown command" ) return root def _A ( ): a__ : List[str] = None print( "enter numbers to create a tree, + value to add value into treap, " "- value to erase all nodes with value. 'q' to quit. " ) a__ : int = input() while args != "q": a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase ) print(lowerCamelCase ) a__ : Optional[Any] = input() print("good by!" ) if __name__ == "__main__": import doctest doctest.testmod() main()
629
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Tuple = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
714
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ): _UpperCamelCase : Optional[int] = StableUnCLIPPipeline _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _UpperCamelCase : Any = False def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Any = 32 a__ : int = embedder_hidden_size # prior components torch.manual_seed(0 ) a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) a__ : Optional[Any] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) a__ : int = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , ) torch.manual_seed(0 ) a__ : str = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case ) a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) a__ : Union[str, Any] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) a__ : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , ) torch.manual_seed(0 ) a__ : Tuple = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , ) torch.manual_seed(0 ) a__ : Optional[int] = AutoencoderKL() a__ : Any = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _snake_case ( self , snake_case , snake_case=0 ) -> Dict: """simple docstring""" if str(snake_case ).startswith("mps" ): a__ : Union[str, Any] = torch.manual_seed(snake_case ) else: a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case ) a__ : Any = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Dict = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=snake_case ) def _snake_case ( self ) -> int: """simple docstring""" a__ : int = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=snake_case ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def _snake_case ( self ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 ) a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" ) a__ : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case , snake_case ) def _snake_case ( self ) -> Tuple: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) a__ : Union[str, Any] = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a__ : Union[str, Any] = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) a__ : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
629
0
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[int] = """""" _UpperCamelCase : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _UpperCamelCase : str = None # compression type in fsspec. ex: "gzip" _UpperCamelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self , snake_case = "" , snake_case = None , snake_case = None , **snake_case ) -> List[str]: """simple docstring""" super().__init__(self , **snake_case ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : Any = fsspec.open( snake_case , mode="rb" , protocol=snake_case , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : List[Any] = os.path.basename(self.file.path.split("::" )[0] ) a__ : List[Any] = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _snake_case ( cls , snake_case ) -> Optional[Any]: """simple docstring""" return super()._strip_protocol(snake_case ).lstrip("/" ) def _snake_case ( self ) -> Optional[int]: """simple docstring""" if self.dir_cache is None: a__ : str = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : Dict = {f["name"]: f} def _snake_case ( self , snake_case ) -> Optional[Any]: """simple docstring""" return self.file.open().read() def _snake_case ( self , snake_case , snake_case = "rb" , snake_case=None , snake_case=True , snake_case=None , **snake_case , ) -> Dict: """simple docstring""" a__ : Optional[int] = self._strip_protocol(snake_case ) if mode != "rb": raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" ) return self.file.open() class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[Any] = """bz2""" _UpperCamelCase : Tuple = """bz2""" _UpperCamelCase : Dict = """.bz2""" class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Dict = """gzip""" _UpperCamelCase : int = """gzip""" _UpperCamelCase : Any = """.gz""" class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : int = """lz4""" _UpperCamelCase : Dict = """lz4""" _UpperCamelCase : str = """.lz4""" class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : int = """xz""" _UpperCamelCase : Tuple = """xz""" _UpperCamelCase : Optional[Any] = """.xz""" class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : str = """zstd""" _UpperCamelCase : int = """zstd""" _UpperCamelCase : Optional[Any] = """.zst""" def __init__( self , snake_case , snake_case = "rb" , snake_case = None , snake_case = None , snake_case = DEFAULT_BLOCK_SIZE , **snake_case , ) -> Optional[int]: """simple docstring""" super().__init__( fo=snake_case , mode=snake_case , target_protocol=snake_case , target_options=snake_case , block_size=snake_case , **snake_case , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Optional[Any] = self.file.__enter__ class __lowerCAmelCase : def __init__( self , snake_case ) -> str: """simple docstring""" a__ : List[str] = file_ def __enter__( self ) -> Any: """simple docstring""" self._file.__enter__() return self def __exit__( self , *snake_case , **snake_case ) -> Tuple: """simple docstring""" self._file.__exit__(*snake_case , **snake_case ) def __iter__( self ) -> Dict: """simple docstring""" return iter(self._file ) def _snake_case ( self ) -> Tuple: """simple docstring""" return next(self._file ) def __getattr__( self , snake_case ) -> Optional[int]: """simple docstring""" return getattr(self._file , snake_case ) def fixed_enter(*snake_case , **snake_case ): return WrappedFile(_enter(*snake_case , **snake_case ) ) a__ : Optional[Any] = fixed_enter
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : str = { """configuration_distilbert""": [ """DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DistilBertConfig""", """DistilBertOnnxConfig""", ], """tokenization_distilbert""": ["""DistilBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ """DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DistilBertForMaskedLM""", """DistilBertForMultipleChoice""", """DistilBertForQuestionAnswering""", """DistilBertForSequenceClassification""", """DistilBertForTokenClassification""", """DistilBertModel""", """DistilBertPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = [ """TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDistilBertForMaskedLM""", """TFDistilBertForMultipleChoice""", """TFDistilBertForQuestionAnswering""", """TFDistilBertForSequenceClassification""", """TFDistilBertForTokenClassification""", """TFDistilBertMainLayer""", """TFDistilBertModel""", """TFDistilBertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Any = [ """FlaxDistilBertForMaskedLM""", """FlaxDistilBertForMultipleChoice""", """FlaxDistilBertForQuestionAnswering""", """FlaxDistilBertForSequenceClassification""", """FlaxDistilBertForTokenClassification""", """FlaxDistilBertModel""", """FlaxDistilBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
629
0
import flax.linen as nn import jax import jax.numpy as jnp class __lowerCAmelCase ( nn.Module ): _UpperCamelCase : int _UpperCamelCase : jnp.dtype = jnp.floataa def _snake_case ( self ) -> Any: """simple docstring""" a__ : Dict = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , snake_case ) -> str: """simple docstring""" a__ : Union[str, Any] = hidden_states.shape a__ : Optional[Any] = jax.image.resize( snake_case , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) a__ : Union[str, Any] = self.conv(snake_case ) return hidden_states class __lowerCAmelCase ( nn.Module ): _UpperCamelCase : int _UpperCamelCase : jnp.dtype = jnp.floataa def _snake_case ( self ) -> List[str]: """simple docstring""" a__ : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , snake_case ) -> Union[str, Any]: """simple docstring""" a__ : str = self.conv(snake_case ) return hidden_states class __lowerCAmelCase ( nn.Module ): _UpperCamelCase : int _UpperCamelCase : int = None _UpperCamelCase : float = 0.0 _UpperCamelCase : bool = None _UpperCamelCase : jnp.dtype = jnp.floataa def _snake_case ( self ) -> Dict: """simple docstring""" a__ : Tuple = self.in_channels if self.out_channels is None else self.out_channels a__ : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) a__ : Tuple = nn.Conv( snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a__ : Optional[Any] = nn.Dense(snake_case , dtype=self.dtype ) a__ : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) a__ : int = nn.Dropout(self.dropout_prob ) a__ : Optional[int] = nn.Conv( snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a__ : Dict = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut a__ : Any = None if use_nin_shortcut: a__ : Any = nn.Conv( snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self , snake_case , snake_case , snake_case=True ) -> str: """simple docstring""" a__ : Tuple = hidden_states a__ : List[Any] = self.norma(snake_case ) a__ : List[Any] = nn.swish(snake_case ) a__ : List[Any] = self.conva(snake_case ) a__ : Tuple = self.time_emb_proj(nn.swish(snake_case ) ) a__ : Optional[int] = jnp.expand_dims(jnp.expand_dims(snake_case , 1 ) , 1 ) a__ : int = hidden_states + temb a__ : Dict = self.norma(snake_case ) a__ : Tuple = nn.swish(snake_case ) a__ : Dict = self.dropout(snake_case , snake_case ) a__ : str = self.conva(snake_case ) if self.conv_shortcut is not None: a__ : Any = self.conv_shortcut(snake_case ) return hidden_states + residual
716
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def _A ( lowerCamelCase ): # A local function to see if a dot lands in the circle. def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool: a__ : Any = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle a__ : Union[str, Any] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCamelCase ) ) # The ratio of the area for circle to square is pi/4. a__ : Any = proportion * 4 print(F"""The estimated value of pi is {pi_estimate}""" ) print(F"""The numpy value of pi is {pi}""" ) print(F"""The total error is {abs(pi - pi_estimate )}""" ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ): return mean( function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value) def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ): def identity_function(lowerCamelCase ) -> float: return x a__ : int = area_under_curve_estimator( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) a__ : int = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {expected_value}""" ) print(F"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def _A ( lowerCamelCase ): def function_to_integrate(lowerCamelCase ) -> float: return sqrt(4.0 - x * x ) a__ : int = area_under_curve_estimator( lowerCamelCase , lowerCamelCase , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {pi}""" ) print(F"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
629
0
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) @add_end_docstrings( _UpperCamelCase ,r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ ,) class __lowerCAmelCase ( _UpperCamelCase ): def _snake_case ( self , snake_case ) -> np.ndarray: """simple docstring""" if self.framework == "tf": a__ : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": a__ : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case ) else: raise ValueError("Unsupported framework" ) return masked_index def _snake_case ( self , snake_case ) -> np.ndarray: """simple docstring""" a__ : Dict = self.get_masked_index(snake_case ) a__ : Tuple = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def _snake_case ( self , snake_case ) -> int: """simple docstring""" if isinstance(snake_case , snake_case ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(snake_case ) def _snake_case ( self , snake_case , snake_case=None , **snake_case ) -> Dict[str, GenericTensor]: """simple docstring""" if return_tensors is None: a__ : List[str] = self.framework a__ : Tuple = self.tokenizer(snake_case , return_tensors=snake_case ) self.ensure_exactly_one_mask_token(snake_case ) return model_inputs def _snake_case ( self , snake_case ) -> List[str]: """simple docstring""" a__ : List[str] = self.model(**snake_case ) a__ : Dict = model_inputs["input_ids"] return model_outputs def _snake_case ( self , snake_case , snake_case=5 , snake_case=None ) -> List[Any]: """simple docstring""" if target_ids is not None and target_ids.shape[0] < top_k: a__ : Any = target_ids.shape[0] a__ : Tuple = model_outputs["input_ids"][0] a__ : Tuple = model_outputs["logits"] if self.framework == "tf": a__ : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] a__ : Dict = outputs.numpy() a__ : str = outputs[0, masked_index, :] a__ : List[Any] = stable_softmax(snake_case , axis=-1 ) if target_ids is not None: a__ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case , 0 ) , target_ids.reshape(-1 , 1 ) ) a__ : List[Any] = tf.expand_dims(snake_case , 0 ) a__ : Optional[int] = tf.math.top_k(snake_case , k=snake_case ) a__ : Tuple = topk.values.numpy(), topk.indices.numpy() else: a__ : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample a__ : Tuple = outputs[0, masked_index, :] a__ : Optional[int] = logits.softmax(dim=-1 ) if target_ids is not None: a__ : Tuple = probs[..., target_ids] a__ : Tuple = probs.topk(snake_case ) a__ : Optional[int] = [] a__ : Union[str, Any] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): a__ : Union[str, Any] = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place a__ : Union[str, Any] = input_ids.numpy().copy() if target_ids is not None: a__ : List[str] = target_ids[p].tolist() a__ : Dict = p # Filter padding out: a__ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back a__ : Union[str, Any] = self.tokenizer.decode(snake_case , skip_special_tokens=snake_case ) a__ : List[Any] = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(snake_case ) result.append(snake_case ) if single_mask: return result[0] return result def _snake_case ( self , snake_case , snake_case=None ) -> Dict: """simple docstring""" if isinstance(snake_case , snake_case ): a__ : Optional[int] = [targets] try: a__ : Dict = self.tokenizer.get_vocab() except Exception: a__ : Dict = {} a__ : List[Any] = [] for target in targets: a__ : List[str] = vocab.get(snake_case , snake_case ) if id_ is None: a__ : List[Any] = self.tokenizer( snake_case , add_special_tokens=snake_case , return_attention_mask=snake_case , return_token_type_ids=snake_case , max_length=1 , truncation=snake_case , )["input_ids"] if len(snake_case ) == 0: logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ "We cannot replace it with anything meaningful, ignoring it" ) continue a__ : List[Any] = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) a__ : Tuple = list(set(snake_case ) ) if len(snake_case ) == 0: raise ValueError("At least one target must be provided when passed." ) a__ : Dict = np.array(snake_case ) return target_ids def _snake_case ( self , snake_case=None , snake_case=None ) -> List[str]: """simple docstring""" a__ : Optional[int] = {} if targets is not None: a__ : Tuple = self.get_target_ids(snake_case , snake_case ) a__ : Optional[int] = target_ids if top_k is not None: a__ : Dict = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__( self , snake_case , *snake_case , **snake_case ) -> Optional[Any]: """simple docstring""" a__ : List[Any] = super().__call__(snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs
717
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _A ( lowerCamelCase , lowerCamelCase ): a__ : Dict = old_name if "patch_embed" in old_name: a__ , a__ , a__ : Union[str, Any] = old_name.split("." ) if layer == "0": a__ : Union[str, Any] = old_name.replace("0" , "convolution1" ) elif layer == "1": a__ : Dict = old_name.replace("1" , "batchnorm_before" ) elif layer == "3": a__ : List[str] = old_name.replace("3" , "convolution2" ) else: a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" ) if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ): a__ : List[str] = r"\b\d{2}\b" if bool(re.search(lowerCamelCase , lowerCamelCase ) ): a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group() else: a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group() if int(match[0] ) < 6: a__ : List[Any] = old_name.replace(lowerCamelCase , "" ) a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] ) a__ : List[Any] = "intermediate_stages." + trimmed_name else: a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" ) if int(match[2] ) < num_meta4D_last_stage: a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] ) else: a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage ) a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index ) if "norm1" in old_name: a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" ) elif "norm2" in old_name: a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" ) elif "fc1" in old_name: a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" ) elif "fc2" in old_name: a__ : Any = trimmed_name.replace("fc2" , "linear_out" ) a__ : Any = "last_stage." + trimmed_name elif "network" in old_name and re.search(r".\d." , lowerCamelCase ): a__ : List[str] = old_name.replace("network" , "intermediate_stages" ) if "fc" in new_name: a__ : str = new_name.replace("fc" , "convolution" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): a__ : str = new_name.replace("norm1" , "batchnorm_before" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): a__ : Any = new_name.replace("norm2" , "batchnorm_after" ) if "proj" in new_name: a__ : Optional[int] = new_name.replace("proj" , "projection" ) if "dist_head" in new_name: a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" ) elif "head" in new_name: a__ : Optional[int] = new_name.replace("head" , "classifier" ) elif "patch_embed" in new_name: a__ : Tuple = "efficientformer." + new_name elif new_name == "norm.weight" or new_name == "norm.bias": a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" ) a__ : Optional[int] = "efficientformer." + new_name else: a__ : List[Any] = "efficientformer.encoder." + new_name return new_name def _A ( lowerCamelCase , lowerCamelCase ): for key in checkpoint.copy().keys(): a__ : Optional[Any] = checkpoint.pop(lowerCamelCase ) a__ : Dict = val return checkpoint def _A ( ): a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return image def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"] a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase ) a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase ) a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] ) a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1 a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() a__ : Dict = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } # prepare image a__ : str = prepare_img() a__ : Dict = 256 a__ : Union[str, Any] = 224 a__ : List[str] = EfficientFormerImageProcessor( size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , ) a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values # original processing pipeline a__ : List[str] = Compose( [ Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ), CenterCrop(lowerCamelCase ), ToTensor(), Normalize(lowerCamelCase , lowerCamelCase ), ] ) a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 ) assert torch.allclose(lowerCamelCase , lowerCamelCase ) a__ : Optional[int] = model(lowerCamelCase ) a__ : Any = outputs.logits a__ : Optional[Any] = (1, 1000) if "l1" in model_name: a__ : Tuple = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: a__ : int = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: a__ : Optional[Any] = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(lowerCamelCase ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print("Pushing model to the hub..." ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
629
0
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class __lowerCAmelCase ( _UpperCamelCase ): def __init__( self , snake_case , snake_case ) -> List[Any]: """simple docstring""" a__ : Union[str, Any] = params a__ : List[str] = np.array(snake_case ) a__ : Dict = np.array([len(snake_case ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , snake_case ) -> int: """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.lengths ) def _snake_case ( self ) -> Dict: """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _snake_case ( self ) -> Union[str, Any]: """simple docstring""" a__ : Optional[Any] = self.params.max_model_input_size a__ : Tuple = self.lengths > max_len logger.info(F"""Splitting {sum(snake_case )} too long sequences.""" ) def divide_chunks(snake_case , snake_case ): return [l[i : i + n] for i in range(0 , len(snake_case ) , snake_case )] a__ : Any = [] a__ : List[Any] = [] if self.params.mlm: a__ : Any = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: a__ : Any = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: a__ : List[str] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: a__ : Union[str, Any] = np.insert(snake_case , 0 , snake_case ) if sub_s[-1] != sep_id: a__ : Tuple = np.insert(snake_case , len(snake_case ) , snake_case ) assert len(snake_case ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(snake_case ) new_tok_ids.extend(snake_case ) new_lengths.extend([len(snake_case ) for l in sub_seqs] ) a__ : int = np.array(snake_case ) a__ : List[Any] = np.array(snake_case ) def _snake_case ( self ) -> int: """simple docstring""" a__ : List[str] = len(self ) a__ : Optional[int] = self.lengths > 11 a__ : List[Any] = self.token_ids[indices] a__ : str = self.lengths[indices] a__ : Optional[Any] = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def _snake_case ( self ) -> Dict: """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: a__ : int = self.params.special_tok_ids["unk_token"] a__ : List[str] = len(self ) a__ : Dict = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) a__ : str = (unk_occs / self.lengths) < 0.5 a__ : Any = self.token_ids[indices] a__ : Dict = self.lengths[indices] a__ : Union[str, Any] = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def _snake_case ( self ) -> str: """simple docstring""" if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _snake_case ( self , snake_case ) -> Any: """simple docstring""" a__ : Union[str, Any] = [t[0] for t in batch] a__ : List[str] = [t[1] for t in batch] assert len(snake_case ) == len(snake_case ) # Max for paddings a__ : Any = max(snake_case ) # Pad token ids if self.params.mlm: a__ : Optional[int] = self.params.special_tok_ids["pad_token"] else: a__ : List[str] = self.params.special_tok_ids["unk_token"] a__ : Any = [list(t.astype(snake_case ) ) + [pad_idx] * (max_seq_len_ - len(snake_case )) for t in token_ids] assert len(tk_ ) == len(snake_case ) assert all(len(snake_case ) == max_seq_len_ for t in tk_ ) a__ : Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_) a__ : Optional[int] = torch.tensor(snake_case ) # (bs) return tk_t, lg_t
718
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ : List[Any] = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE__ : Any = { """unc-nlp/lxmert-base-uncased""": 5_1_2, } SCREAMING_SNAKE_CASE__ : Optional[int] = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = LxmertTokenizer def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any: """simple docstring""" super().__init__( snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , ) a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars ): a__ : str = getattr(snake_case , normalizer_state.pop("type" ) ) a__ : Tuple = do_lower_case a__ : Union[str, Any] = strip_accents a__ : str = tokenize_chinese_chars a__ : List[str] = normalizer_class(**snake_case ) a__ : str = do_lower_case def _snake_case ( self , snake_case , snake_case=None ) -> List[str]: """simple docstring""" a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , snake_case , snake_case = None ) -> List[int]: """simple docstring""" a__ : Tuple = [self.sep_token_id] a__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]: """simple docstring""" a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case )
629
0
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : int = 0 _UpperCamelCase : bool = False _UpperCamelCase : float = 3.0 class __lowerCAmelCase ( unittest.TestCase ): def _snake_case ( self ) -> Optional[int]: """simple docstring""" self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def _snake_case ( self ) -> Optional[Any]: """simple docstring""" a__ : List[str] = GradScalerKwargs(init_scale=1_024 , growth_factor=2 ) AcceleratorState._reset_state() a__ : List[Any] = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) a__ : int = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_000 ) self.assertEqual(scaler._enabled , snake_case ) @require_multi_gpu def _snake_case ( self ) -> Tuple: """simple docstring""" a__ : List[Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True) SCREAMING_SNAKE_CASE__ = Accelerator(kwargs_handlers=[ddp_scaler]) SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1_0_0, 2_0_0) SCREAMING_SNAKE_CASE__ = accelerator.prepare(model) # Check the values changed in kwargs SCREAMING_SNAKE_CASE__ = """""" SCREAMING_SNAKE_CASE__ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4) if observed_bucket_cap_map != 1_5: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
719
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Optional[int] = """mobilenet_v2""" def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int: """simple docstring""" super().__init__(**snake_case ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) a__ : str = num_channels a__ : Dict = image_size a__ : Any = depth_multiplier a__ : str = depth_divisible_by a__ : Optional[int] = min_depth a__ : Dict = expand_ratio a__ : str = output_stride a__ : Optional[int] = first_layer_is_expansion a__ : Union[str, Any] = finegrained_output a__ : Union[str, Any] = hidden_act a__ : str = tf_padding a__ : List[Any] = classifier_dropout_prob a__ : List[Any] = initializer_range a__ : Optional[Any] = layer_norm_eps a__ : str = semantic_loss_ignore_index class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Any = version.parse("""1.11""" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _snake_case ( self ) -> float: """simple docstring""" return 1E-4
629
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[int] = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : List[str] = """layoutlmv3""" def __init__( self , snake_case=50_265 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-5 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case=1_024 , snake_case=128 , snake_case=128 , snake_case=True , snake_case=32 , snake_case=128 , snake_case=64 , snake_case=256 , snake_case=True , snake_case=True , snake_case=True , snake_case=224 , snake_case=3 , snake_case=16 , snake_case=None , **snake_case , ) -> str: """simple docstring""" super().__init__( vocab_size=snake_case , hidden_size=snake_case , num_hidden_layers=snake_case , num_attention_heads=snake_case , intermediate_size=snake_case , hidden_act=snake_case , hidden_dropout_prob=snake_case , attention_probs_dropout_prob=snake_case , max_position_embeddings=snake_case , type_vocab_size=snake_case , initializer_range=snake_case , layer_norm_eps=snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case , ) a__ : Optional[int] = max_ad_position_embeddings a__ : Optional[Any] = coordinate_size a__ : int = shape_size a__ : Dict = has_relative_attention_bias a__ : int = rel_pos_bins a__ : Any = max_rel_pos a__ : List[Any] = has_spatial_attention_bias a__ : Optional[int] = rel_ad_pos_bins a__ : List[str] = max_rel_ad_pos a__ : Union[str, Any] = text_embed a__ : Optional[Any] = visual_embed a__ : Optional[Any] = input_size a__ : Union[str, Any] = num_channels a__ : str = patch_size a__ : Any = classifier_dropout class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Tuple = version.parse("""1.12""" ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) else: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) @property def _snake_case ( self ) -> float: """simple docstring""" return 1E-5 @property def _snake_case ( self ) -> int: """simple docstring""" return 12 def _snake_case ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , snake_case = 3 , snake_case = 40 , snake_case = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , "apply_ocr" , snake_case ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX a__ : List[Any] = compute_effective_axis_dimension( snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX a__ : Tuple = processor.tokenizer.num_special_tokens_to_add(snake_case ) a__ : Optional[int] = compute_effective_axis_dimension( snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case ) # Generate dummy inputs according to compute batch and sequence a__ : Dict = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes a__ : Optional[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) a__ : Tuple = self._generate_dummy_images(snake_case , snake_case , snake_case , snake_case ) a__ : Optional[int] = dict( processor( snake_case , text=snake_case , boxes=snake_case , return_tensors=snake_case , ) ) return inputs
720
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def _A ( lowerCamelCase ): a__ : List[str] = [] if isinstance(lowerCamelCase , lowerCamelCase ): for v in tree.values(): shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(lowerCamelCase ) ) elif isinstance(lowerCamelCase , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase ): a__ : List[str] = [] for d in reversed(lowerCamelCase ): idx.append(flat_idx % d ) a__ : Union[str, Any] = flat_idx // d return tuple(reversed(lowerCamelCase ) ) @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ): # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(lowerCamelCase ) -> None: a__ : int = True for i in range(len(lowerCamelCase ) ): a__ : Optional[Any] = -1 * (i + 1) l[reversed_idx] &= tally a__ : Tuple = l[reversed_idx] if start_edges is None: a__ : Optional[int] = [s == 0 for s in start] reduce_edge_list(lowerCamelCase ) if end_edges is None: a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )] reduce_edge_list(lowerCamelCase ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCamelCase ) == 0: return [()] elif len(lowerCamelCase ) == 1: return [(slice(start[0] , end[0] + 1 ),)] a__ : List[Tuple[slice, ...]] = [] a__ : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCamelCase , lowerCamelCase ): if s == e: path_list.append(slice(lowerCamelCase , s + 1 ) ) else: break a__ : Tuple[slice, ...] = tuple(lowerCamelCase ) a__ : Optional[Any] = len(lowerCamelCase ) # start == end, and we're done if divergence_idx == len(lowerCamelCase ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : Optional[Any] = start[divergence_idx] return tuple( path + (slice(lowerCamelCase , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : List[str] = end[divergence_idx] return tuple( path + (slice(lowerCamelCase , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) a__ : Optional[int] = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : Optional[int] = t.shape[:no_batch_dims] a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) ) # _get_minimal_slice_set is inclusive a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) ) # Get an ordered list of slices to perform a__ : str = _get_minimal_slice_set( lowerCamelCase , lowerCamelCase , lowerCamelCase , ) a__ : Any = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ): if not (len(lowerCamelCase ) > 0): raise ValueError("Must provide at least one input" ) a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )] a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] ) def _prep_inputs(lowerCamelCase ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase ) a__ : str = None if _out is not None: a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) a__ : Optional[Any] = 1 for d in orig_batch_dims: flat_batch_dim *= d a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(lowerCamelCase ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t a__ : str = 0 a__ : Any = prepped_outputs for _ in range(lowerCamelCase ): # Chunk the input if not low_mem: a__ : str = _select_chunk else: a__ : Tuple = partial( _chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , ) a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase ) # Run the layer on the chunk a__ : Any = layer(**lowerCamelCase ) # Allocate space for the output if out is None: a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase ) # Put the chunk in its pre-allocated space if isinstance(lowerCamelCase , lowerCamelCase ): def assign(lowerCamelCase , lowerCamelCase ) -> None: for k, v in da.items(): if isinstance(lowerCamelCase , lowerCamelCase ): assign(lowerCamelCase , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: a__ : Dict = da[k] assign(lowerCamelCase , lowerCamelCase ) elif isinstance(lowerCamelCase , lowerCamelCase ): for xa, xa in zip(lowerCamelCase , lowerCamelCase ): if _add_into_out: xa[i : i + chunk_size] += xa else: a__ : Dict = xa elif isinstance(lowerCamelCase , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: a__ : Dict = output_chunk else: raise ValueError("Not supported" ) i += chunk_size a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase ) return out class __lowerCAmelCase : def __init__( self , snake_case = 512 , ) -> List[str]: """simple docstring""" a__ : int = max_chunk_size a__ : Optional[int] = None a__ : Optional[tuple] = None def _snake_case ( self , snake_case , snake_case , snake_case ) -> int: """simple docstring""" logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] a__ : List[str] = [c for c in candidates if c > min_chunk_size] a__ : Optional[int] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(snake_case ) -> bool: try: with torch.no_grad(): fn(*snake_case , chunk_size=snake_case ) return True except RuntimeError: return False a__ : Union[str, Any] = 0 a__ : Dict = len(snake_case ) - 1 while i > min_viable_chunk_size_index: a__ : Any = test_chunk_size(candidates[i] ) if not viable: a__ : List[Any] = (min_viable_chunk_size_index + i) // 2 else: a__ : Tuple = i a__ : Any = (i + len(snake_case ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _snake_case ( self , snake_case , snake_case ) -> bool: """simple docstring""" a__ : str = True for aa, aa in zip(snake_case , snake_case ): assert type(snake_case ) == type(snake_case ) if isinstance(snake_case , (list, tuple) ): consistent &= self._compare_arg_caches(snake_case , snake_case ) elif isinstance(snake_case , snake_case ): a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )] consistent &= self._compare_arg_caches(snake_case , snake_case ) else: consistent &= aa == aa return consistent def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int: """simple docstring""" a__ : List[Any] = True a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(snake_case ) a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case ) else: # Otherwise, we can reuse the precomputed value a__ : Optional[int] = False if not consistent: a__ : List[str] = self._determine_favorable_chunk_size( snake_case , snake_case , snake_case , ) a__ : List[str] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
629
0
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Tuple = TypeVar("""T""") class __lowerCAmelCase ( Generic[T] ): def __init__( self , snake_case ) -> Tuple: """simple docstring""" a__ : Tuple = data a__ : Node[T] | None = None def __str__( self ) -> str: """simple docstring""" return F"""{self.data}""" class __lowerCAmelCase ( Generic[T] ): def __init__( self ) -> None: """simple docstring""" a__ : Node[T] | None = None def __iter__( self ) -> Iterator[T]: """simple docstring""" a__ : Tuple = self.top while node: yield node.data a__ : Optional[int] = node.next def __str__( self ) -> str: """simple docstring""" return "->".join([str(snake_case ) for item in self] ) def __len__( self ) -> int: """simple docstring""" return len(tuple(iter(self ) ) ) def _snake_case ( self ) -> bool: """simple docstring""" return self.top is None def _snake_case ( self , snake_case ) -> None: """simple docstring""" a__ : Any = Node(snake_case ) if not self.is_empty(): a__ : Optional[Any] = self.top a__ : List[str] = node def _snake_case ( self ) -> T: """simple docstring""" if self.is_empty(): raise IndexError("pop from empty stack" ) assert isinstance(self.top , snake_case ) a__ : Optional[Any] = self.top a__ : Optional[Any] = self.top.next return pop_node.data def _snake_case ( self ) -> T: """simple docstring""" if self.is_empty(): raise IndexError("peek from empty stack" ) assert self.top is not None return self.top.data def _snake_case ( self ) -> None: """simple docstring""" a__ : Optional[int] = None if __name__ == "__main__": from doctest import testmod testmod()
721
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : int = """upernet""" def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]: """simple docstring""" super().__init__(**snake_case ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(snake_case , snake_case ): a__ : Optional[int] = backbone_config.get("model_type" ) a__ : str = CONFIG_MAPPING[backbone_model_type] a__ : str = config_class.from_dict(snake_case ) a__ : int = backbone_config a__ : Optional[Any] = hidden_size a__ : Optional[Any] = initializer_range a__ : Tuple = pool_scales a__ : Optional[Any] = use_auxiliary_head a__ : Optional[Any] = auxiliary_loss_weight a__ : Dict = auxiliary_in_channels a__ : Optional[int] = auxiliary_channels a__ : Any = auxiliary_num_convs a__ : Any = auxiliary_concat_input a__ : int = loss_ignore_index def _snake_case ( self ) -> str: """simple docstring""" a__ : Tuple = copy.deepcopy(self.__dict__ ) a__ : Optional[Any] = self.backbone_config.to_dict() a__ : List[Any] = self.__class__.model_type return output
629
0