code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def __snake_case ( ) -> Dict: A_ : Union[str, Any] = torch.nn.Linear(2 , 4 ) A_ : int = torch.optim.AdamW(model.parameters() , lr=1.0 ) A_ : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_lowerCAmelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) A_ : Optional[int] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) A_ : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def __snake_case ( _lowerCAmelCase : List[str] ) -> Dict: return (model.weight.abs().sum() + model.bias.abs().sum()).item() def __snake_case ( _lowerCAmelCase : int ) -> int: A_ : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_lowerCAmelCase ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" @require_cuda def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Union[str, Any] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(snake_case ): A_ : Optional[Any] = Accelerator(cpu=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Optional[Any] = Accelerator() A_ : Optional[Any] = GradientState() assert state.num_steps == 1 A_ : Optional[Any] = 4 assert state.num_steps == 4 assert state.sync_gradients is True A_ : Any = False assert state.sync_gradients is False GradientState._reset_state() def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : Any = Accelerator() A_ , A_ , A_ , A_ , A_ : Union[str, Any] = create_components() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Tuple = accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : List[str] = Accelerator() A_ , A_ , A_ , A_ , A_ : Optional[int] = create_components() accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*snake_case :Optional[int] , **snake_case :Optional[Any] ): pass with patch("torch.cuda.set_device" , snake_case ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ): A_ : Union[str, Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , "cuda:64" ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Any = Accelerator() A_ , A_ , A_ , A_ , A_ : Dict = create_components() accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case ) A_ : Dict = get_signature(snake_case ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(snake_case ) # make sure random weights don't match load_random_weights(snake_case ) self.assertTrue(abs(model_signature - get_signature(snake_case ) ) > 1e-3 ) # make sure loaded weights match accelerator.load_state(snake_case ) self.assertTrue(abs(model_signature - get_signature(snake_case ) ) < 1e-3 ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : Dict = Accelerator() A_ , A_ , A_ , A_ , A_ : Optional[int] = create_components() accelerator.prepare(snake_case , snake_case , snake_case , snake_case , snake_case ) A_ : Optional[Any] = get_signature(snake_case ) # saving hook def save_config(snake_case :List[Any] , snake_case :Any , snake_case :Any ): A_ : Dict = {"class_name": models[0].__class__.__name__} with open(os.path.join(snake_case , "data.json" ) , "w" ) as f: json.dump(snake_case , snake_case ) # loading hook def load_config(snake_case :str , snake_case :List[Any] ): with open(os.path.join(snake_case , "data.json" ) , "r" ) as f: A_ : Optional[int] = json.load(snake_case ) A_ : Dict = config["class_name"] A_ : List[Any] = accelerator.register_save_state_pre_hook(snake_case ) A_ : Optional[int] = accelerator.register_load_state_pre_hook(snake_case ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(snake_case ) # make sure random weights don't match with hooks load_random_weights(snake_case ) self.assertTrue(abs(model_signature - get_signature(snake_case ) ) > 1e-3 ) # random class name to verify correct one is loaded A_ : List[Any] = "random" # make sure loaded weights match with hooks accelerator.load_state(snake_case ) self.assertTrue(abs(model_signature - get_signature(snake_case ) ) < 1e-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(snake_case ) # make sure random weights don't match with hooks removed load_random_weights(snake_case ) self.assertTrue(abs(model_signature - get_signature(snake_case ) ) > 1e-3 ) # random class name to verify correct one is loaded A_ : List[Any] = "random" # make sure loaded weights match with hooks removed accelerator.load_state(snake_case ) self.assertTrue(abs(model_signature - get_signature(snake_case ) ) < 1e-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Optional[Any] = Accelerator() A_ , A_ , A_ , A_ , A_ : List[str] = create_components() A_ : Union[str, Any] = None # This should work A_ , A_ , A_ , A_ , A_ , A_ : int = accelerator.prepare( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) self.assertTrue(dummy_obj is None ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = Accelerator() A_ , A_ , A_ , A_ , A_ : Optional[int] = create_components() A_ : str = [1, 2, 3] # This should work A_ , A_ , A_ , A_ , A_ , A_ : Dict = accelerator.prepare( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) self.assertEqual( getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Dummy object should have `_is_accelerate_prepared` set to `True`" , ) self.assertEqual( getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Model is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(snake_case , "_is_accelerate_prepared" , snake_case ) , snake_case , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) @slow @require_bnb def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' from transformers import AutoModelForCausalLM A_ : Any = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=snake_case , device_map={"": 0} , ) A_ : Dict = Accelerator() # This should work A_ : Optional[Any] = accelerator.prepare(snake_case ) @slow @require_bnb def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' from transformers import AutoModelForCausalLM A_ : Optional[int] = Accelerator() with init_empty_weights(): A_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() A_ : Any = infer_auto_device_map(snake_case ) A_ : Dict = "cpu" A_ : Any = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , device_map=snake_case , load_in_abit=snake_case , llm_inta_enable_fpaa_cpu_offload=snake_case ) # This should not work and get value error with self.assertRaises(snake_case ): A_ : Tuple = accelerator.prepare(snake_case ) @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' from transformers import AutoModelForCausalLM A_ : Tuple = {"distributed_type": DistributedType.MULTI_GPU} with init_empty_weights(): A_ : Dict = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() A_ : int = infer_auto_device_map(snake_case ) A_ : Tuple = 1 A_ : int = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=snake_case , device_map=snake_case , ) A_ : Dict = Accelerator() # This should not work and get value error with self.assertRaises(snake_case ): A_ : Any = accelerator.prepare(snake_case ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): A_ : str = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) A_ : Optional[Any] = infer_auto_device_map(snake_case ) A_ : Optional[Any] = 1 A_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=snake_case , device_map=snake_case , ) A_ : int = Accelerator() # This should work A_ : List[Any] = accelerator.prepare(snake_case ) @require_cuda def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : int = torch.nn.Linear(10 , 10 ) A_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.01 ) A_ : Optional[Any] = Accelerator(cpu=snake_case ) A_ : str = accelerator.prepare(snake_case )
300
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float: A_ : Dict = a while True: A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - ( Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307 return float(_lowerCAmelCase ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
300
1
from __future__ import annotations def __snake_case ( _lowerCAmelCase : list[int | str] ) -> None: create_state_space_tree(_lowerCAmelCase , [] , 0 , [0 for i in range(len(_lowerCAmelCase ) )] ) def __snake_case ( _lowerCAmelCase : list[int | str] , _lowerCAmelCase : list[int | str] , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , ) -> None: if index == len(_lowerCAmelCase ): print(_lowerCAmelCase ) return for i in range(len(_lowerCAmelCase ) ): if not index_used[i]: current_sequence.append(sequence[i] ) A_ : Optional[int] = True create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 , _lowerCAmelCase ) current_sequence.pop() A_ : Optional[Any] = False _lowerCAmelCase : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) _lowerCAmelCase : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
300
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets _lowerCAmelCase : List[Any] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' _lowerCAmelCase : Union[str, Any] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' _lowerCAmelCase : Optional[Any] = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' A_ : List[str] = len(references[0] ) if any(len(snake_case ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A_ : int = [[refs[i] for refs in references] for i in range(snake_case )] A_ : Optional[Any] = TER( normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , ) A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
300
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { '''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''', '''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''', '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''', '''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''', '''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''', '''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''', '''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''', '''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''', '''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''', '''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''', '''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''', '''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''', } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''codegen''' __UpperCamelCase = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self :Optional[Any] , snake_case :Tuple=50_400 , snake_case :Any=2_048 , snake_case :Tuple=2_048 , snake_case :str=4_096 , snake_case :List[Any]=28 , snake_case :str=16 , snake_case :Union[str, Any]=64 , snake_case :Tuple=None , snake_case :Optional[int]="gelu_new" , snake_case :List[Any]=0.0 , snake_case :Any=0.0 , snake_case :List[Any]=0.0 , snake_case :int=1e-5 , snake_case :List[str]=0.02 , snake_case :Dict=True , snake_case :Any=50_256 , snake_case :Any=50_256 , snake_case :Any=False , **snake_case :List[str] , ): '''simple docstring''' A_ : Any = vocab_size A_ : List[Any] = n_ctx A_ : Union[str, Any] = n_positions A_ : Optional[Any] = n_embd A_ : Optional[Any] = n_layer A_ : str = n_head A_ : int = n_inner A_ : int = rotary_dim A_ : List[str] = activation_function A_ : Optional[Any] = resid_pdrop A_ : List[str] = embd_pdrop A_ : int = attn_pdrop A_ : Any = layer_norm_epsilon A_ : Dict = initializer_range A_ : int = use_cache A_ : int = bos_token_id A_ : List[str] = eos_token_id super().__init__( bos_token_id=snake_case , eos_token_id=snake_case , tie_word_embeddings=snake_case , **snake_case ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Any , snake_case :PretrainedConfig , snake_case :str = "default" , snake_case :List[PatchingSpec] = None , snake_case :bool = False , ): '''simple docstring''' super().__init__(snake_case , task=snake_case , patching_specs=snake_case , use_past=snake_case ) if not getattr(self._config , "pad_token_id" , snake_case ): # TODO: how to do that better? A_ : Tuple = 0 @property def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Dict = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(snake_case , direction="inputs" ) A_ : List[str] = {0: "batch", 1: "past_sequence + sequence"} else: A_ : Optional[int] = {0: "batch", 1: "sequence"} return common_inputs @property def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return self._config.n_layer @property def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return self._config.n_head def SCREAMING_SNAKE_CASE ( self :int , snake_case :PreTrainedTokenizer , snake_case :int = -1 , snake_case :int = -1 , snake_case :bool = False , snake_case :Optional[TensorType] = None , ): '''simple docstring''' A_ : Any = super(snake_case , self ).generate_dummy_inputs( snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case ) # We need to order the input in the way they appears in the forward() A_ : int = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A_ , A_ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values A_ : Optional[int] = seqlen + 2 A_ : Any = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A_ : Dict = [ (torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(self.num_layers ) ] A_ : Optional[int] = common_inputs["attention_mask"] if self.use_past: A_ : int = ordered_inputs["attention_mask"].dtype A_ : Any = torch.cat( [ordered_inputs["attention_mask"], torch.ones(snake_case , snake_case , dtype=snake_case )] , dim=1 ) return ordered_inputs @property def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' return 13
300
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str: return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Any: return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=float("inf" ) ) -> int: for i in range(points_counts - 1 ): for j in range(i + 1 , _lowerCAmelCase ): A_ : Tuple = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=float("inf" ) ) -> Dict: for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ): for j in range(max(0 , i - 6 ) , _lowerCAmelCase ): A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]: # base case if points_counts <= 3: return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase ) # recursion A_ : Optional[int] = points_counts // 2 A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase ) A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid ) A_ : Tuple = min(_lowerCAmelCase , _lowerCAmelCase ) A_ : Dict = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowerCAmelCase ) A_ : Tuple = dis_between_closest_in_strip( _lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase ) return min(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Any: A_ : Optional[Any] = column_based_sort(_lowerCAmelCase , column=0 ) A_ : Optional[int] = column_based_sort(_lowerCAmelCase , column=1 ) return ( closest_pair_of_points_sqr( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) ** 0.5 if __name__ == "__main__": _lowerCAmelCase : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
300
1
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=None ) -> List[Any]: A_ : str = None if token is not None: A_ : List[Any] = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} A_ : int = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" A_ : Union[str, Any] = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json() A_ : Union[str, Any] = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) A_ : Any = math.ceil((result["total_count"] - 100) / 100 ) for i in range(_lowerCAmelCase ): A_ : Union[str, Any] = requests.get(url + f"&page={i + 2}" , headers=_lowerCAmelCase ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any]=None ) -> Tuple: A_ : Tuple = None if token is not None: A_ : Tuple = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} A_ : Optional[int] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" A_ : Optional[int] = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json() A_ : Tuple = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) A_ : str = math.ceil((result["total_count"] - 100) / 100 ) for i in range(_lowerCAmelCase ): A_ : int = requests.get(url + f"&page={i + 2}" , headers=_lowerCAmelCase ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Union[str, Any]: A_ : Dict = None if token is not None: A_ : Any = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} A_ : Tuple = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase , allow_redirects=_lowerCAmelCase ) A_ : List[Any] = result.headers["Location"] A_ : Tuple = requests.get(_lowerCAmelCase , allow_redirects=_lowerCAmelCase ) A_ : List[Any] = os.path.join(_lowerCAmelCase , f"{artifact_name}.zip" ) with open(_lowerCAmelCase , "wb" ) as fp: fp.write(response.content ) def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]=None ) -> List[Any]: A_ : Optional[Any] = [] A_ : Optional[Any] = [] A_ : Dict = None with zipfile.ZipFile(_lowerCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_lowerCAmelCase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(_lowerCAmelCase ) as f: for line in f: A_ : int = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs A_ : Any = line[: line.index(": " )] A_ : int = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed A_ : str = line[len("FAILED " ) :] failed_tests.append(_lowerCAmelCase ) elif filename == "job_name.txt": A_ : Tuple = line if len(_lowerCAmelCase ) != len(_lowerCAmelCase ): raise ValueError( f"`errors` and `failed_tests` should have the same number of elements. Got {len(_lowerCAmelCase )} for `errors` " f"and {len(_lowerCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" " problem." ) A_ : List[Any] = None if job_name and job_links: A_ : Union[str, Any] = job_links.get(_lowerCAmelCase , _lowerCAmelCase ) # A list with elements of the form (line of error, error, failed test) A_ : List[Any] = [x + [y] + [job_link] for x, y in zip(_lowerCAmelCase , _lowerCAmelCase )] return result def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]: A_ : Optional[int] = [] A_ : Optional[int] = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for p in os.listdir(_lowerCAmelCase ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(_lowerCAmelCase , job_links=_lowerCAmelCase ) ) return errors def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any=None ) -> int: A_ : List[Any] = Counter() counter.update([x[1] for x in logs] ) A_ : Any = counter.most_common() A_ : Union[str, Any] = {} for error, count in counts: if error_filter is None or error not in error_filter: A_ : Optional[Any] = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} A_ : List[Any] = dict(sorted(r.items() , key=lambda _lowerCAmelCase : item[1]["count"] , reverse=_lowerCAmelCase ) ) return r def __snake_case ( _lowerCAmelCase : Optional[int] ) -> int: A_ : str = test.split("::" )[0] if test.startswith("tests/models/" ): A_ : Dict = test.split("/" )[2] else: A_ : str = None return test def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=None ) -> Dict: A_ : Optional[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs] A_ : Optional[Any] = [x for x in logs if x[2] is not None] A_ : Union[str, Any] = {x[2] for x in logs} A_ : Tuple = {} for test in tests: A_ : Optional[Any] = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) A_ : Union[str, Any] = counter.most_common() A_ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} A_ : Optional[int] = sum(error_counts.values() ) if n_errors > 0: A_ : List[Any] = {"count": n_errors, "errors": error_counts} A_ : Tuple = dict(sorted(r.items() , key=lambda _lowerCAmelCase : item[1]["count"] , reverse=_lowerCAmelCase ) ) return r def __snake_case ( _lowerCAmelCase : str ) -> int: A_ : Optional[int] = "| no. | error | status |" A_ : Dict = "|-:|:-|:-|" A_ : Union[str, Any] = [header, sep] for error in reduced_by_error: A_ : Optional[int] = reduced_by_error[error]["count"] A_ : str = f"| {count} | {error[:100]} | |" lines.append(_lowerCAmelCase ) return "\n".join(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> List[str]: A_ : Tuple = "| model | no. of errors | major error | count |" A_ : Dict = "|-:|-:|-:|-:|" A_ : List[Any] = [header, sep] for model in reduced_by_model: A_ : Optional[Any] = reduced_by_model[model]["count"] A_ , A_ : Optional[Any] = list(reduced_by_model[model]["errors"].items() )[0] A_ : Any = f"| {model} | {count} | {error[:60]} | {_count} |" lines.append(_lowerCAmelCase ) return "\n".join(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') _lowerCAmelCase : Union[str, Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) _lowerCAmelCase : Optional[Any] = get_job_links(args.workflow_run_id, token=args.token) _lowerCAmelCase : int = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: _lowerCAmelCase : str = k.find(''' / ''') _lowerCAmelCase : Union[str, Any] = k[index + len(''' / ''') :] _lowerCAmelCase : Optional[Any] = v with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) _lowerCAmelCase : int = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) _lowerCAmelCase : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error _lowerCAmelCase : Any = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors _lowerCAmelCase : Optional[int] = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) _lowerCAmelCase : List[str] = reduce_by_error(errors) _lowerCAmelCase : str = reduce_by_model(errors) _lowerCAmelCase : Union[str, Any] = make_github_table(reduced_by_error) _lowerCAmelCase : List[str] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa) with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa)
300
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : """simple docstring""" def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ): '''simple docstring''' A_ : Tuple = parent A_ : int = batch_size A_ : List[str] = image_size A_ : List[Any] = patch_size A_ : Optional[Any] = num_channels A_ : List[Any] = is_training A_ : Tuple = use_labels A_ : Union[str, Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Any = num_attention_heads A_ : List[str] = intermediate_size A_ : Optional[int] = hidden_act A_ : List[str] = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Any = type_sequence_label_size A_ : List[str] = initializer_range A_ : Dict = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Optional[int] = (image_size // patch_size) ** 2 A_ : List[str] = num_patches + 1 def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Tuple = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ): '''simple docstring''' A_ : Optional[Any] = ViTMSNModel(config=snake_case ) model.to(snake_case ) model.eval() A_ : int = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ): '''simple docstring''' A_ : Dict = self.type_sequence_label_size A_ : Tuple = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Union[str, Any] = model(snake_case , labels=snake_case ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : Union[str, Any] = 1 A_ : int = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Optional[Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : List[str] = self.prepare_config_and_inputs() A_ , A_ , A_ : Optional[int] = config_and_inputs A_ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __UpperCamelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Tuple = ViTMSNModelTester(self ) A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(snake_case ) A_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[str] = [*signature.parameters.keys()] A_ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __snake_case ( ) -> Optional[Any]: A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' torch.manual_seed(2 ) A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case ) A_ : List[str] = self.default_image_processor A_ : int = prepare_img() A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case ) # forward pass with torch.no_grad(): A_ : Optional[int] = model(**snake_case ) # verify the logits A_ : List[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
300
1
_lowerCAmelCase : int = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _lowerCAmelCase : Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _lowerCAmelCase : Any = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
300
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = (DDPMScheduler,) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ): '''simple docstring''' A_ : Dict = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**snake_case ) return config def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case , beta_end=snake_case ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.check_over_configs(thresholding=snake_case ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Tuple = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : List[str] = scheduler_class(**snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : int = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : int = scheduler_class(**snake_case ) A_ : Tuple = len(snake_case ) A_ : List[str] = self.dummy_model() A_ : Optional[Any] = self.dummy_sample_deter A_ : List[str] = torch.manual_seed(0 ) for t in reversed(range(snake_case ) ): # 1. predict noise residual A_ : Tuple = model(snake_case , snake_case ) # 2. predict previous mean of sample x_t-1 A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : Optional[int] = pred_prev_sample A_ : Tuple = torch.sum(torch.abs(snake_case ) ) A_ : str = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Optional[int] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config(prediction_type="v_prediction" ) A_ : List[str] = scheduler_class(**snake_case ) A_ : int = len(snake_case ) A_ : Dict = self.dummy_model() A_ : str = self.dummy_sample_deter A_ : Any = torch.manual_seed(0 ) for t in reversed(range(snake_case ) ): # 1. predict noise residual A_ : Optional[int] = model(snake_case , snake_case ) # 2. predict previous mean of sample x_t-1 A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : List[str] = pred_prev_sample A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) ) A_ : List[str] = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.scheduler_classes[0] A_ : Optional[Any] = self.get_scheduler_config() A_ : Dict = scheduler_class(**snake_case ) A_ : Optional[int] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=snake_case ) A_ : Optional[int] = scheduler.timesteps for i, timestep in enumerate(snake_case ): if i == len(snake_case ) - 1: A_ : str = -1 else: A_ : List[str] = timesteps[i + 1] A_ : Optional[int] = scheduler.previous_timestep(snake_case ) A_ : List[str] = prev_t.item() self.assertEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Optional[Any] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config() A_ : Tuple = scheduler_class(**snake_case ) A_ : List[str] = [100, 87, 50, 51, 0] with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Any = self.scheduler_classes[0] A_ : Union[str, Any] = self.get_scheduler_config() A_ : Optional[int] = scheduler_class(**snake_case ) A_ : Union[str, Any] = [100, 87, 50, 1, 0] A_ : Optional[int] = len(snake_case ) with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Union[str, Any] = self.scheduler_classes[0] A_ : Optional[Any] = self.get_scheduler_config() A_ : Optional[int] = scheduler_class(**snake_case ) A_ : Optional[int] = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=snake_case )
300
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase : Union[str, Any] = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Union[str, Any] = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys _lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
300
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : int = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]: for attribute in key.split("." ): A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: A_ : Tuple = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Any = value elif weight_type == "bias": A_ : str = value else: A_ : Any = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]: A_ : Optional[Any] = [] A_ : Any = fairseq_model.state_dict() A_ : Union[str, Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ : str = None for name, value in fairseq_dict.items(): A_ : Tuple = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) A_ : Optional[Any] = True elif name.split("." )[0] == "proj": A_ : Dict = fairseq_model.proj A_ : List[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ : int = True if "*" in mapped_key: A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2] A_ : int = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: A_ : List[Any] = "weight_g" elif "weight_v" in name: A_ : List[Any] = "weight_v" elif "bias" in name: A_ : Dict = "bias" elif "weight" in name: A_ : List[Any] = "weight" else: A_ : Dict = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: A_ : Any = full_name.split("conv_layers." )[-1] A_ : Optional[int] = name.split("." ) A_ : Optional[Any] = int(items[0] ) A_ : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) A_ : List[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) A_ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) A_ : List[Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) A_ : Tuple = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str: A_ , A_ : List[str] = emb.weight.shape A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) A_ : List[Any] = emb.weight.data return lin_layer def __snake_case ( _lowerCAmelCase : str ) -> Tuple: with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: A_ : int = f.readlines() A_ : Dict = [line.split(" " )[0] for line in lines] A_ : Tuple = len(_lowerCAmelCase ) A_ : Union[str, Any] = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple: A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) A_ : str = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) A_ : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ : Union[str, Any] = model[0].eval() # set weights for wav2vec2 encoder A_ : Tuple = WavaVecaModel(_lowerCAmelCase ) A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase ) A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) A_ : Optional[Any] = False # add projection layer A_ : Optional[Any] = nn.Parameter(projection_layer.weight ) A_ : int = nn.Parameter(projection_layer.bias ) A_ : str = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) A_ : Optional[int] = hf_wavavec.config.to_dict() A_ : int = tokenizer.pad_token_id A_ : List[str] = tokenizer.bos_token_id A_ : List[str] = tokenizer.eos_token_id A_ : List[str] = "speech_to_text_2" A_ : Tuple = "wav2vec2" A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') _lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
300
1
import requests from bsa import BeautifulSoup def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : dict ) -> str: A_ : Tuple = BeautifulSoup(requests.get(_lowerCAmelCase , params=_lowerCAmelCase ).content , "html.parser" ) A_ : Optional[int] = soup.find("div" , attrs={"class": "gs_ri"} ) A_ : Any = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" ) return anchors[2].get_text() if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = { '''title''': ( '''Precisely geometry controlled microsupercapacitors for ultrahigh areal ''' '''capacitance, volumetric capacitance, and energy density''' ), '''journal''': '''Chem. Mater.''', '''volume''': 30, '''pages''': '''3979-3990''', '''year''': 2_018, '''hl''': '''en''', } print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
300
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : """simple docstring""" def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ): '''simple docstring''' A_ : str = parent A_ : str = batch_size A_ : str = seq_length A_ : Any = is_training A_ : Any = use_input_mask A_ : str = use_token_type_ids A_ : Tuple = use_labels A_ : Optional[Any] = vocab_size A_ : Dict = hidden_size A_ : str = num_hidden_layers A_ : Dict = num_attention_heads A_ : str = intermediate_size A_ : int = hidden_act A_ : List[Any] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : List[Any] = type_vocab_size A_ : Any = type_sequence_label_size A_ : Dict = initializer_range A_ : Any = num_labels A_ : Optional[int] = num_choices A_ : Optional[Any] = scope A_ : Any = range_bbox def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A_ : str = bbox[i, j, 3] A_ : Union[str, Any] = bbox[i, j, 1] A_ : List[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: A_ : Any = bbox[i, j, 2] A_ : Tuple = bbox[i, j, 0] A_ : int = t A_ : int = tf.convert_to_tensor(snake_case ) A_ : Any = None if self.use_input_mask: A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : str = None if self.use_token_type_ids: A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Dict = None A_ : List[Any] = None A_ : List[str] = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : str = ids_tensor([self.batch_size] , self.num_choices ) A_ : int = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ): '''simple docstring''' A_ : Any = TFLayoutLMModel(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) A_ : str = model(snake_case , snake_case , token_type_ids=snake_case ) A_ : List[Any] = model(snake_case , snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ): '''simple docstring''' A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Union[str, Any] = self.num_labels A_ : int = TFLayoutLMForSequenceClassification(config=snake_case ) A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.num_labels A_ : str = TFLayoutLMForTokenClassification(config=snake_case ) A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case ) A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = config_and_inputs A_ : Optional[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 10 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Tuple = TFLayoutLMModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def __snake_case ( ) -> Optional[Any]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs() # forward pass A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the sequence output on [0, :3, :3] A_ : List[Any] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) ) # test the pooled output on [1, :3] A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs() # forward pass A_ : Dict = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar A_ : List[str] = outputs.loss A_ : Union[str, Any] = (2,) self.assertEqual(loss.shape , snake_case ) # test the shape of the logits A_ : Tuple = outputs.logits A_ : Tuple = (2, 2) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) # test the shape of the logits A_ : Dict = outputs.logits A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the shape of the logits A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , snake_case ) self.assertEqual(outputs.end_logits.shape , snake_case )
300
1
def __snake_case ( _lowerCAmelCase : str ) -> int: assert column_title.isupper() A_ : Tuple = 0 A_ : Optional[Any] = len(_lowerCAmelCase ) - 1 A_ : List[str] = 0 while index >= 0: A_ : int = (ord(column_title[index] ) - 64) * pow(26 , _lowerCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
300
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore _lowerCAmelCase : Optional[int] = ''' Human: <<task>> Assistant: ''' _lowerCAmelCase : int = '''huggingface-tools/default-prompts''' _lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''} def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]: if prompt_or_repo_id is None: A_ : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id A_ : Optional[Any] = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
300
1
from __future__ import annotations def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> None: if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): A_ , A_ : List[str] = array[indexa], array[indexa] def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> None: if length > 1: A_ : Optional[int] = int(length / 2 ) for i in range(_lowerCAmelCase , low + middle ): comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase ) bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> None: if length > 1: A_ : Optional[int] = int(length / 2 ) bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 ) bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 ) bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip() _lowerCAmelCase : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('''\nSorted array in ascending order is: ''', end='''''') print(*unsorted, sep=''', ''') bitonic_merge(unsorted, 0, len(unsorted), 0) print('''Sorted array in descending order is: ''', end='''''') print(*unsorted, sep=''', ''')
300
def __snake_case ( _lowerCAmelCase : list ) -> list: if len(_lowerCAmelCase ) <= 1: return [tuple(_lowerCAmelCase )] A_ : Tuple = [] def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ): A_ : List[str] = [0] * n res.append(tuple(_lowerCAmelCase ) ) A_ : int = 0 while i < n: if c[i] < i: if i % 2 == 0: A_ , A_ : str = arr[i], arr[0] else: A_ , A_ : List[str] = arr[i], arr[c[i]] res.append(tuple(_lowerCAmelCase ) ) c[i] += 1 A_ : Tuple = 0 else: A_ : Dict = 0 i += 1 generate(len(_lowerCAmelCase ) , _lowerCAmelCase ) return res if __name__ == "__main__": _lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip() _lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
300
1
class __magic_name__ : """simple docstring""" def __init__( self :List[str] , snake_case :int , snake_case :Any=None , snake_case :Any=None ): '''simple docstring''' A_ : Dict = data A_ : List[str] = previous A_ : Tuple = next_node def __str__( self :Tuple ): '''simple docstring''' return f"{self.data}" def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' return self.data def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' return self.next def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' return self.previous class __magic_name__ : """simple docstring""" def __init__( self :str , snake_case :Optional[Any] ): '''simple docstring''' A_ : Optional[Any] = head def __iter__( self :int ): '''simple docstring''' return self def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' if not self.current: raise StopIteration else: A_ : Dict = self.current.get_data() A_ : Optional[int] = self.current.get_next() return value class __magic_name__ : """simple docstring""" def __init__( self :Union[str, Any] ): '''simple docstring''' A_ : str = None # First node in list A_ : Any = None # Last node in list def __str__( self :Dict ): '''simple docstring''' A_ : Any = self.head A_ : Optional[Any] = [] while current is not None: nodes.append(current.get_data() ) A_ : List[Any] = current.get_next() return " ".join(str(snake_case ) for node in nodes ) def __contains__( self :Tuple , snake_case :int ): '''simple docstring''' A_ : Dict = self.head while current: if current.get_data() == value: return True A_ : str = current.get_next() return False def __iter__( self :List[Any] ): '''simple docstring''' return LinkedListIterator(self.head ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' if self.head: return self.head.get_data() return None def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' if self.tail: return self.tail.get_data() return None def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Node ): '''simple docstring''' if self.head is None: A_ : int = node A_ : Tuple = node else: self.insert_before_node(self.head , snake_case ) def SCREAMING_SNAKE_CASE ( self :str , snake_case :Node ): '''simple docstring''' if self.head is None: self.set_head(snake_case ) else: self.insert_after_node(self.tail , snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :int ): '''simple docstring''' A_ : str = Node(snake_case ) if self.head is None: self.set_head(snake_case ) else: self.set_tail(snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Node , snake_case :Node ): '''simple docstring''' A_ : Optional[int] = node A_ : Any = node.previous if node.get_previous() is None: A_ : Union[str, Any] = node_to_insert else: A_ : str = node_to_insert A_ : Optional[int] = node_to_insert def SCREAMING_SNAKE_CASE ( self :int , snake_case :Node , snake_case :Node ): '''simple docstring''' A_ : Optional[int] = node A_ : Any = node.next if node.get_next() is None: A_ : Union[str, Any] = node_to_insert else: A_ : Union[str, Any] = node_to_insert A_ : List[str] = node_to_insert def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :int , snake_case :int ): '''simple docstring''' A_ : str = 1 A_ : Union[str, Any] = Node(snake_case ) A_ : List[Any] = self.head while node: if current_position == position: self.insert_before_node(snake_case , snake_case ) return current_position += 1 A_ : int = node.next self.insert_after_node(self.tail , snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int ): '''simple docstring''' A_ : Dict = self.head while node: if node.get_data() == item: return node A_ : Tuple = node.get_next() raise Exception("Node not found" ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Tuple ): '''simple docstring''' if (node := self.get_node(snake_case )) is not None: if node == self.head: A_ : Optional[Any] = self.head.get_next() if node == self.tail: A_ : int = self.tail.get_previous() self.remove_node_pointers(snake_case ) @staticmethod def SCREAMING_SNAKE_CASE ( snake_case :Node ): '''simple docstring''' if node.get_next(): A_ : Tuple = node.previous if node.get_previous(): A_ : Tuple = node.next A_ : str = None A_ : Union[str, Any] = None def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return self.head is None def __snake_case ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
300
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer _lowerCAmelCase : int = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase : List[Any] = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } _lowerCAmelCase : Any = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = RobertaTokenizer def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ): '''simple docstring''' super().__init__( snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , ) A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space: A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) ) A_ : Optional[int] = add_prefix_space A_ : int = pre_tok_class(**snake_case ) A_ : Optional[int] = add_prefix_space A_ : Optional[int] = "post_processor" A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case ) if tokenizer_component_instance: A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : List[Any] = tuple(state["sep"] ) if "cls" in state: A_ : Optional[Any] = tuple(state["cls"] ) A_ : Tuple = False if state.get("add_prefix_space" , snake_case ) != add_prefix_space: A_ : List[Any] = add_prefix_space A_ : Optional[int] = True if state.get("trim_offsets" , snake_case ) != trim_offsets: A_ : List[str] = trim_offsets A_ : Any = True if changes_to_apply: A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) ) A_ : Any = component_class(**snake_case ) setattr(self.backend_tokenizer , snake_case , snake_case ) @property def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ): '''simple docstring''' A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value A_ : Any = value def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ): '''simple docstring''' A_ : Any = kwargs.get("is_split_into_words" , snake_case ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ): '''simple docstring''' A_ : Any = kwargs.get("is_split_into_words" , snake_case ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ): '''simple docstring''' A_ : str = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ): '''simple docstring''' A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ): '''simple docstring''' A_ : Any = [self.sep_token_id] A_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
300
1
from typing import TYPE_CHECKING from ...utils import _LazyModule _lowerCAmelCase : Tuple = {'''tokenization_bertweet''': ['''BertweetTokenizer''']} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys _lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
300
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCAmelCase : int = '''\ @misc{wu2016googles, title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } ''' _lowerCAmelCase : Tuple = '''\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the \'GLEU score\'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score\'s range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. ''' _lowerCAmelCase : int = '''\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: \'google_bleu\': google_bleu score Examples: Example 1: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.44 Example 2: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.61 Example 3: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results["google_bleu"], 2)) 0.53 Example 4: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results["google_bleu"], 2)) 0.4 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case ) }
300
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any]=False ) -> Optional[Any]: A_ : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A_ : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=False ) -> Any: for i in range(config.num_hidden_layers ): if base_model: A_ : Optional[Any] = "" else: A_ : Optional[int] = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) A_ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict A_ : int = in_proj_weight[ : config.hidden_size, : ] A_ : Union[str, Any] = in_proj_bias[: config.hidden_size] A_ : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ : Dict = in_proj_weight[ -config.hidden_size :, : ] A_ : int = in_proj_bias[-config.hidden_size :] def __snake_case ( _lowerCAmelCase : str ) -> int: A_ : str = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ) -> str: A_ : Dict = dct.pop(_lowerCAmelCase ) A_ : Dict = val def __snake_case ( ) -> Dict: A_ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ : Tuple = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> Optional[int]: A_ : List[str] = ViTConfig() A_ : int = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": A_ : Tuple = True A_ : int = int(vit_name[-12:-10] ) A_ : Union[str, Any] = int(vit_name[-9:-6] ) else: A_ : List[Any] = 1000 A_ : Tuple = "huggingface/label-files" A_ : Dict = "imagenet-1k-id2label.json" A_ : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) ) A_ : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} A_ : List[Any] = idalabel A_ : Optional[Any] = {v: k for k, v in idalabel.items()} A_ : List[str] = int(vit_name[-6:-4] ) A_ : List[str] = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("tiny" ): A_ : Optional[Any] = 192 A_ : Dict = 768 A_ : int = 12 A_ : List[Any] = 3 elif vit_name[9:].startswith("small" ): A_ : Union[str, Any] = 384 A_ : List[Any] = 1536 A_ : List[str] = 12 A_ : List[Any] = 6 else: pass else: if vit_name[4:].startswith("small" ): A_ : List[str] = 768 A_ : Union[str, Any] = 2304 A_ : Tuple = 8 A_ : str = 8 elif vit_name[4:].startswith("base" ): pass elif vit_name[4:].startswith("large" ): A_ : List[str] = 1024 A_ : Tuple = 4096 A_ : List[Any] = 24 A_ : Optional[Any] = 16 elif vit_name[4:].startswith("huge" ): A_ : List[str] = 1280 A_ : List[Any] = 5120 A_ : str = 32 A_ : Any = 16 # load original model from timm A_ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys A_ : Union[str, Any] = timm_model.state_dict() if base_model: remove_classification_head_(_lowerCAmelCase ) A_ : List[str] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": A_ : Optional[int] = ViTModel(_lowerCAmelCase ).eval() else: A_ : Optional[Any] = ViTForImageClassification(_lowerCAmelCase ).eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: A_ : str = DeiTImageProcessor(size=config.image_size ) else: A_ : Any = ViTImageProcessor(size=config.image_size ) A_ : str = image_processor(images=prepare_img() , return_tensors="pt" ) A_ : Dict = encoding["pixel_values"] A_ : List[str] = model(_lowerCAmelCase ) if base_model: A_ : Dict = timm_model.forward_features(_lowerCAmelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1e-3 ) else: A_ : Optional[Any] = timm_model(_lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_patch16_224''', type=str, help='''Name of the ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _lowerCAmelCase : List[Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
300
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]: A_ : Tuple = tmp_path / "cache" A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str: A_ : List[Any] = tmp_path / "cache" A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : int = features.copy() if features else default_expected_features A_ : str = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]: A_ : Dict = tmp_path / "cache" A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]: if issubclass(_lowerCAmelCase , _lowerCAmelCase ): A_ : int = parquet_path elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): A_ : Optional[int] = [parquet_path] A_ : Optional[int] = tmp_path / "cache" A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) for split in splits: A_ : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]: A_ : Optional[Any] = tmp_path / "cache" A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ : Union[str, Any] = ParquetDatasetReader( {"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple: A_ : Optional[Any] = tmp_path / "cache" A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : List[str] = features.copy() if features else default_expected_features A_ : Tuple = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]: if split: A_ : Any = {split: parquet_path} else: A_ : Optional[Any] = "train" A_ : str = {"train": parquet_path, "test": parquet_path} A_ : Any = tmp_path / "cache" A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict: A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) A_ : Dict = pf.read() assert dataset.data.table == output_table def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]: A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" ) A_ : int = {"image": [image_path]} A_ : Optional[Any] = Features({"image": Image()} ) A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase ) A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any: assert get_writer_batch_size(_lowerCAmelCase ) == expected
300
1
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class __magic_name__ : """simple docstring""" __UpperCamelCase = LEDConfig __UpperCamelCase = {} __UpperCamelCase = '''gelu''' def __init__( self :Optional[int] , snake_case :List[str] , snake_case :Any=13 , snake_case :Optional[int]=7 , snake_case :Union[str, Any]=True , snake_case :List[str]=False , snake_case :str=99 , snake_case :Dict=32 , snake_case :Dict=2 , snake_case :Dict=4 , snake_case :str=37 , snake_case :Optional[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :List[Any]=20 , snake_case :List[str]=2 , snake_case :List[Any]=1 , snake_case :Any=0 , snake_case :Dict=4 , ): '''simple docstring''' A_ : List[Any] = parent A_ : str = batch_size A_ : Optional[Any] = seq_length A_ : List[str] = is_training A_ : Optional[Any] = use_labels A_ : Any = vocab_size A_ : Optional[Any] = hidden_size A_ : Any = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : Optional[int] = intermediate_size A_ : Any = hidden_dropout_prob A_ : Optional[Any] = attention_probs_dropout_prob A_ : Tuple = max_position_embeddings A_ : int = eos_token_id A_ : Optional[int] = pad_token_id A_ : Dict = bos_token_id A_ : int = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after A_ : Optional[int] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests A_ : int = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A_ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A_ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Union[str, Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) A_ : Union[str, Any] = prepare_led_inputs_dict(snake_case , snake_case , snake_case ) A_ : int = tf.concat( [tf.zeros_like(snake_case )[:, :-1], tf.ones_like(snake_case )[:, -1:]] , axis=-1 , ) A_ : Dict = global_attention_mask return config, inputs_dict def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Dict , snake_case :Optional[int] ): '''simple docstring''' A_ : List[Any] = TFLEDModel(config=snake_case ).get_decoder() A_ : Dict = inputs_dict["input_ids"] A_ : Union[str, Any] = input_ids[:1, :] A_ : Any = inputs_dict["attention_mask"][:1, :] A_ : Union[str, Any] = 1 # first forward pass A_ : int = model(snake_case , attention_mask=snake_case , use_cache=snake_case ) A_ , A_ : Any = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A_ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A_ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) A_ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A_ : Any = model(snake_case , attention_mask=snake_case )[0] A_ : Tuple = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A_ : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A_ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx] A_ : int = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case , snake_case , rtol=1e-3 ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[Any]=None , ) -> Optional[Any]: if attention_mask is None: A_ : Optional[Any] = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A_ : Tuple = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A_ : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A_ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () __UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else () __UpperCamelCase = ( { '''conversational''': TFLEDForConditionalGeneration, '''feature-extraction''': TFLEDModel, '''summarization''': TFLEDForConditionalGeneration, '''text2text-generation''': TFLEDForConditionalGeneration, '''translation''': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : int = TFLEDModelTester(self ) A_ : int = ConfigTester(self , config_class=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() A_ : Dict = tf.zeros_like(inputs_dict["attention_mask"] ) A_ : int = 2 A_ : List[Any] = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , ) A_ : str = True A_ : Any = self.model_tester.seq_length A_ : int = self.model_tester.encoder_seq_length def check_decoder_attentions_output(snake_case :str ): A_ : Dict = outputs.decoder_attentions self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(snake_case :int ): A_ : Optional[Any] = [t.numpy() for t in outputs.encoder_attentions] A_ : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: A_ : Union[str, Any] = True A_ : Any = False A_ : int = False A_ : str = model_class(snake_case ) A_ : Tuple = model(self._prepare_for_class(snake_case , snake_case ) ) A_ : Dict = len(snake_case ) self.assertEqual(config.output_hidden_states , snake_case ) check_encoder_attentions_output(snake_case ) if self.is_encoder_decoder: A_ : Optional[Any] = model_class(snake_case ) A_ : Tuple = model(self._prepare_for_class(snake_case , snake_case ) ) self.assertEqual(config.output_hidden_states , snake_case ) check_decoder_attentions_output(snake_case ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A_ : Optional[Any] = True A_ : List[str] = model_class(snake_case ) A_ : Tuple = model(self._prepare_for_class(snake_case , snake_case ) ) self.assertEqual(config.output_hidden_states , snake_case ) check_encoder_attentions_output(snake_case ) # Check attention is always last and order is fine A_ : Optional[int] = True A_ : Optional[int] = True A_ : Dict = model_class(snake_case ) A_ : Optional[Any] = model(self._prepare_for_class(snake_case , snake_case ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case ) ) self.assertEqual(model.config.output_hidden_states , snake_case ) check_encoder_attentions_output(snake_case ) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Dict: return tf.constant(_lowerCAmelCase , dtype=tf.intaa ) _lowerCAmelCase : Union[str, Any] = 1e-4 @slow @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : int = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led # change to intended input here A_ : Any = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) A_ : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) A_ : Optional[Any] = prepare_led_inputs_dict(model.config , snake_case , snake_case ) A_ : Tuple = model(**snake_case )[0] A_ : Optional[int] = (1, 1_024, 768) self.assertEqual(output.shape , snake_case ) # change to expected output here A_ : Any = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , ) tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-3 ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ) # change to intended input here A_ : Tuple = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) A_ : List[str] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) A_ : Optional[Any] = prepare_led_inputs_dict(model.config , snake_case , snake_case ) A_ : int = model(**snake_case )[0] A_ : Optional[int] = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , snake_case ) # change to expected output here A_ : Union[str, Any] = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , ) tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-3 , rtol=1e-3 )
300
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int: with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f: A_ : Optional[int] = json.load(_lowerCAmelCase ) A_ : Union[str, Any] = {} A_ : Tuple = [] A_ : Optional[Any] = [] for key, info in class_info.items(): A_ : Tuple = info["name"] class_names.append(info["name"] ) if info["isthing"]: thing_ids.append(int(_lowerCAmelCase ) ) A_ : Optional[Any] = thing_ids A_ : int = class_names return metadata class __magic_name__ ( unittest.TestCase ): """simple docstring""" def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ): '''simple docstring''' A_ : Tuple = parent A_ : List[str] = batch_size A_ : Optional[int] = num_channels A_ : Tuple = min_resolution A_ : List[Any] = max_resolution A_ : Union[str, Any] = do_resize A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size A_ : Tuple = do_normalize A_ : List[str] = image_mean A_ : List[Any] = image_std A_ : Union[str, Any] = class_info_file A_ : List[Any] = prepare_metadata(snake_case , snake_case ) A_ : Tuple = num_text A_ : str = repo_path # for the post_process_functions A_ : Any = 2 A_ : int = 10 A_ : Optional[int] = 10 A_ : Tuple = 3 A_ : Tuple = 4 A_ : str = num_labels A_ : int = do_reduce_labels A_ : List[Any] = ignore_index def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ): '''simple docstring''' if not batched: A_ : List[str] = image_inputs[0] if isinstance(snake_case , Image.Image ): A_ , A_ : Dict = image.size else: A_ , A_ : Tuple = image.shape[1], image.shape[2] if w < h: A_ : str = int(self.size["shortest_edge"] * h / w ) A_ : Any = self.size["shortest_edge"] elif w > h: A_ : Optional[int] = self.size["shortest_edge"] A_ : List[str] = int(self.size["shortest_edge"] * w / h ) else: A_ : List[str] = self.size["shortest_edge"] A_ : Optional[Any] = self.size["shortest_edge"] else: A_ : Tuple = [] for image in image_inputs: A_ , A_ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0] A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1] return expected_height, expected_width def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __UpperCamelCase = image_processing_class def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Union[str, Any] = OneFormerImageProcessorTester(self ) @property def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , "image_mean" ) ) self.assertTrue(hasattr(snake_case , "image_std" ) ) self.assertTrue(hasattr(snake_case , "do_normalize" ) ) self.assertTrue(hasattr(snake_case , "do_resize" ) ) self.assertTrue(hasattr(snake_case , "size" ) ) self.assertTrue(hasattr(snake_case , "ignore_index" ) ) self.assertTrue(hasattr(snake_case , "class_info_file" ) ) self.assertTrue(hasattr(snake_case , "num_text" ) ) self.assertTrue(hasattr(snake_case , "repo_path" ) ) self.assertTrue(hasattr(snake_case , "metadata" ) ) self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : List[str] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : Optional[Any] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : Any = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ): '''simple docstring''' A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # prepare image and target A_ : Tuple = self.image_processing_tester.num_labels A_ : str = None A_ : Tuple = None A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) if with_segmentation_maps: A_ : List[str] = num_labels if is_instance_map: A_ : List[str] = list(range(snake_case ) ) * 2 A_ : int = dict(enumerate(snake_case ) ) A_ : List[str] = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": A_ : int = [Image.fromarray(snake_case ) for annotation in annotations] A_ : List[str] = image_processor( snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , ) return inputs def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' def common(snake_case :Dict=False , snake_case :Optional[int]=None ): A_ : Tuple = self.comm_get_image_processor_inputs( with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case ) A_ : Optional[Any] = inputs["mask_labels"] A_ : List[Any] = inputs["class_labels"] A_ : Optional[Any] = inputs["pixel_values"] A_ : int = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text ) common() common(is_instance_map=snake_case ) common(is_instance_map=snake_case , segmentation_type="pil" ) common(is_instance_map=snake_case , segmentation_type="pil" ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = np.zeros((20, 50) ) A_ : List[str] = 1 A_ : int = 1 A_ : Optional[Any] = 1 A_ : Any = binary_mask_to_rle(snake_case ) self.assertEqual(len(snake_case ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs() A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case ) self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : str = self.image_processing_tester.get_fake_oneformer_outputs() A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Tuple = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs() A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
300
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : Dict = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ) -> List[Any]: A_ : List[str] = original_name.split("." )[0] A_ : str = key.split("." ) A_ : Union[str, Any] = int(key_list[key_list.index(_lowerCAmelCase ) - 2] ) A_ : Any = int(key_list[key_list.index(_lowerCAmelCase ) - 1] ) A_ : Tuple = orig_block_num - offset A_ : Any = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" ) return key def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> Optional[int]: A_ : Any = OrderedDict() A_ , A_ : str = 0, 0 for key, value in state_dict.items(): if key.startswith("network" ): A_ : List[Any] = key.replace("network" , "poolformer.encoder" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias" ) and "patch_embed" not in key: patch_emb_offset += 1 A_ : Tuple = key[: key.find("proj" )] A_ : Optional[Any] = key.replace(_lowerCAmelCase , f"patch_embeddings.{total_embed_found}." ) A_ : List[str] = key.replace("proj" , "projection" ) if key.endswith("bias" ): total_embed_found += 1 if "patch_embeddings" in key: A_ : Optional[Any] = "poolformer.encoder." + key if "mlp.fc1" in key: A_ : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "mlp.fc1" , "output.conv1" ) if "mlp.fc2" in key: A_ : Tuple = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "mlp.fc2" , "output.conv2" ) if "norm1" in key: A_ : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "norm1" , "before_norm" ) if "norm2" in key: A_ : Union[str, Any] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "norm2" , "after_norm" ) if "layer_scale_1" in key: A_ : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "layer_scale_1" , "layer_scale_1" ) if "layer_scale_2" in key: A_ : Any = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "layer_scale_2" , "layer_scale_2" ) if "head" in key: A_ : Optional[int] = key.replace("head" , "classifier" ) A_ : Any = value return new_state_dict def __snake_case ( ) -> str: A_ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return image @torch.no_grad() def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]: A_ : Tuple = PoolFormerConfig() # set attributes based on model_name A_ : int = "huggingface/label-files" A_ : List[Any] = model_name[-3:] A_ : Tuple = 1000 A_ : Optional[int] = "imagenet-1k-id2label.json" A_ : int = (1, 1000) # set config attributes A_ : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) ) A_ : Tuple = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} A_ : Optional[Any] = idalabel A_ : Dict = {v: k for k, v in idalabel.items()} if size == "s12": A_ : Optional[Any] = [2, 2, 6, 2] A_ : Dict = [64, 128, 320, 512] A_ : Tuple = 4.0 A_ : str = 0.9 elif size == "s24": A_ : Any = [4, 4, 12, 4] A_ : List[Any] = [64, 128, 320, 512] A_ : Optional[Any] = 4.0 A_ : Any = 0.9 elif size == "s36": A_ : Any = [6, 6, 18, 6] A_ : Any = [64, 128, 320, 512] A_ : str = 4.0 A_ : List[str] = 1e-6 A_ : List[str] = 0.9 elif size == "m36": A_ : List[str] = [6, 6, 18, 6] A_ : str = [96, 192, 384, 768] A_ : Optional[int] = 4.0 A_ : str = 1e-6 A_ : Optional[Any] = 0.95 elif size == "m48": A_ : Any = [8, 8, 24, 8] A_ : Any = [96, 192, 384, 768] A_ : Optional[Any] = 4.0 A_ : int = 1e-6 A_ : List[Any] = 0.95 else: raise ValueError(f"Size {size} not supported" ) # load image processor A_ : str = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase ) # Prepare image A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).pixel_values logger.info(f"Converting model {model_name}..." ) # load original state dict A_ : List[Any] = torch.load(_lowerCAmelCase , map_location=torch.device("cpu" ) ) # rename keys A_ : Dict = rename_keys(_lowerCAmelCase ) # create HuggingFace model and load state dict A_ : Optional[int] = PoolFormerForImageClassification(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) model.eval() # Define image processor A_ : Dict = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase ) A_ : Dict = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values # forward pass A_ : Optional[Any] = model(_lowerCAmelCase ) A_ : str = outputs.logits # define expected logit slices for different models if size == "s12": A_ : Union[str, Any] = torch.tensor([-0.30_45, -0.67_58, -0.48_69] ) elif size == "s24": A_ : Optional[int] = torch.tensor([0.44_02, -0.13_74, -0.80_45] ) elif size == "s36": A_ : Optional[int] = torch.tensor([-0.60_80, -0.51_33, -0.58_98] ) elif size == "m36": A_ : Union[str, Any] = torch.tensor([0.39_52, 0.22_63, -1.26_68] ) elif size == "m48": A_ : int = torch.tensor([0.11_67, -0.06_56, -0.34_23] ) else: raise ValueError(f"Size {size} not supported" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-2 ) # finally, save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
300
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''data2vec-vision''' def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ): '''simple docstring''' super().__init__(**snake_case ) A_ : Dict = hidden_size A_ : Tuple = num_hidden_layers A_ : List[str] = num_attention_heads A_ : Any = intermediate_size A_ : Optional[Any] = hidden_act A_ : Any = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Optional[Any] = initializer_range A_ : List[str] = layer_norm_eps A_ : str = image_size A_ : Optional[int] = patch_size A_ : int = num_channels A_ : Optional[Any] = use_mask_token A_ : Optional[Any] = use_absolute_position_embeddings A_ : Optional[int] = use_relative_position_bias A_ : Dict = use_shared_relative_position_bias A_ : Any = layer_scale_init_value A_ : Optional[Any] = drop_path_rate A_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Tuple = out_indices A_ : Optional[Any] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : str = use_auxiliary_head A_ : List[Any] = auxiliary_loss_weight A_ : List[str] = auxiliary_channels A_ : Dict = auxiliary_num_convs A_ : List[str] = auxiliary_concat_input A_ : Optional[int] = semantic_loss_ignore_index class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return 1e-4
300
1
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowerCAmelCase : Tuple = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ): '''simple docstring''' warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , snake_case , ) super().__init__(*snake_case , **snake_case )
300
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging _lowerCAmelCase : str = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = ['''input_features''', '''attention_mask'''] def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ): '''simple docstring''' super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case ) A_ : Union[str, Any] = feature_size A_ : int = sampling_rate A_ : str = padding_value A_ : int = hop_length A_ : List[str] = win_length A_ : Any = frame_signal_scale A_ : str = preemphasis_coeff A_ : List[str] = mel_floor A_ : str = normalize_means A_ : Any = normalize_vars A_ : Optional[Any] = win_function A_ : Dict = return_attention_mask A_ : List[str] = win_length * sampling_rate // 1_000 A_ : List[str] = hop_length * sampling_rate // 1_000 A_ : List[str] = optimal_fft_length(self.sample_size ) A_ : str = (self.n_fft // 2) + 1 def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ): '''simple docstring''' if self.win_function == "hamming_window": A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case ) else: A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function ) A_ : Optional[int] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) A_ : Tuple = spectrogram( one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , ) return msfc_features.T def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ): '''simple docstring''' if self.normalize_means: A_ : int = x[:input_length].mean(axis=0 ) A_ : Any = np.subtract(snake_case , snake_case ) if self.normalize_vars: A_ : List[Any] = x[:input_length].std(axis=0 ) A_ : Optional[int] = np.divide(snake_case , snake_case ) if input_length < x.shape[0]: A_ : Optional[int] = padding_value # make sure array is in float32 A_ : Union[str, Any] = x.astype(np.floataa ) return x def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ): '''simple docstring''' A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )] def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) A_ : Optional[Any] = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): A_ : int = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Tuple = [raw_speech] # extract fbank features A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech] # convert into correct format for padding A_ : Union[str, Any] = BatchFeature({"input_features": features} ) A_ : str = self.pad( snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , ) # make sure list is in array format A_ : Optional[int] = padded_inputs.get("input_features" ) if isinstance(input_features[0] , snake_case ): A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features] A_ : Dict = padded_inputs.get("attention_mask" ) if attention_mask is not None: A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: A_ : Dict = ( np.array(snake_case , dtype=np.intaa ) if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) A_ : Optional[int] = self.normalize( padded_inputs["input_features"] , attention_mask=snake_case ) if return_tensors is not None: A_ : Dict = padded_inputs.convert_to_tensors(snake_case ) return padded_inputs
300
1
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : int = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]: for attribute in key.split("." ): A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: A_ : Tuple = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Any = value elif weight_type == "bias": A_ : str = value else: A_ : Any = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]: A_ : Optional[Any] = [] A_ : Any = fairseq_model.state_dict() A_ : Union[str, Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ : str = None for name, value in fairseq_dict.items(): A_ : Tuple = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) A_ : Optional[Any] = True elif name.split("." )[0] == "proj": A_ : Dict = fairseq_model.proj A_ : List[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ : int = True if "*" in mapped_key: A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2] A_ : int = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: A_ : List[Any] = "weight_g" elif "weight_v" in name: A_ : List[Any] = "weight_v" elif "bias" in name: A_ : Dict = "bias" elif "weight" in name: A_ : List[Any] = "weight" else: A_ : Dict = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: A_ : Any = full_name.split("conv_layers." )[-1] A_ : Optional[int] = name.split("." ) A_ : Optional[Any] = int(items[0] ) A_ : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) A_ : List[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) A_ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) A_ : List[Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) A_ : Tuple = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str: A_ , A_ : List[str] = emb.weight.shape A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) A_ : List[Any] = emb.weight.data return lin_layer def __snake_case ( _lowerCAmelCase : str ) -> Tuple: with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: A_ : int = f.readlines() A_ : Dict = [line.split(" " )[0] for line in lines] A_ : Tuple = len(_lowerCAmelCase ) A_ : Union[str, Any] = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple: A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) A_ : str = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) A_ : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ : Union[str, Any] = model[0].eval() # set weights for wav2vec2 encoder A_ : Tuple = WavaVecaModel(_lowerCAmelCase ) A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase ) A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) A_ : Optional[Any] = False # add projection layer A_ : Optional[Any] = nn.Parameter(projection_layer.weight ) A_ : int = nn.Parameter(projection_layer.bias ) A_ : str = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) A_ : Optional[int] = hf_wavavec.config.to_dict() A_ : int = tokenizer.pad_token_id A_ : List[str] = tokenizer.bos_token_id A_ : List[str] = tokenizer.eos_token_id A_ : List[str] = "speech_to_text_2" A_ : Tuple = "wav2vec2" A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') _lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
300
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' super().__init__() A_ : Tuple = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" f" `n_embd`: {n_embd} are not equal." ) A_ : List[Any] = prefix_inner_dim A_ : Union[str, Any] = prefix_hidden_dim A_ : List[str] = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = ( nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = GPTaConfig( vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , ) A_ : Optional[Any] = GPTaLMHeadModel(snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ): '''simple docstring''' A_ : Any = self.transformer.transformer.wte(snake_case ) A_ : str = self.encode_prefix(snake_case ) A_ : Union[str, Any] = self.decode_prefix(snake_case ) A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A_ : int = torch.cat((dummy_token, input_ids) , dim=1 ) A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ): '''simple docstring''' return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ): '''simple docstring''' return self.encode_prefix(snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Any = torch.split(snake_case , 1 , dim=0 ) A_ : Optional[int] = [] A_ : Union[str, Any] = [] for feature in features: A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature # Only support beam search for now A_ , A_ : Dict = self.generate_beam( input_embeds=snake_case , device=snake_case , eos_token_id=snake_case ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A_ : int = torch.stack(snake_case ) A_ : int = torch.stack(snake_case ) return generated_tokens, generated_seq_lengths @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ): '''simple docstring''' A_ : Optional[Any] = eos_token_id A_ : List[Any] = None A_ : List[Any] = None A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int ) A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool ) if input_embeds is not None: A_ : Any = input_embeds else: A_ : Optional[Any] = self.transformer.transformer.wte(snake_case ) for i in range(snake_case ): A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case ) A_ : str = outputs.logits A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A_ : List[str] = logits.softmax(-1 ).log() if scores is None: A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 ) A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] ) A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A_ : Union[str, Any] = next_tokens else: A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] ) A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 ) else: A_ : List[str] = -float(np.inf ) A_ : List[Any] = 0 A_ : Union[str, Any] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 A_ : Optional[Any] = scores_sum / seq_lengths[:, None] A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 ) A_ : str = next_tokens // scores_sum.shape[1] A_ : Union[str, Any] = seq_lengths[next_tokens_source] A_ : Optional[int] = next_tokens % scores_sum.shape[1] A_ : Tuple = next_tokens.unsqueeze(1 ) A_ : Tuple = tokens[next_tokens_source] A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 ) A_ : Dict = generated[next_tokens_source] A_ : Union[str, Any] = scores_sum_average * seq_lengths A_ : Optional[int] = is_stopped[next_tokens_source] A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 ) A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze() if is_stopped.all(): break A_ : int = scores / seq_lengths A_ : str = scores.argsort(descending=snake_case ) # tokens tensors are already padded to max_seq_length A_ : Dict = [tokens[i] for i in order] A_ : int = torch.stack(snake_case , dim=0 ) A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
300
1
from collections.abc import Sequence def __snake_case ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ) -> float: return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) ) def __snake_case ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ) -> float: A_ : Tuple = 0.0 for coeff in reversed(_lowerCAmelCase ): A_ : Tuple = result * x + coeff return result if __name__ == "__main__": _lowerCAmelCase : Dict = (0.0, 0.0, 5.0, 9.3, 7.0) _lowerCAmelCase : Optional[int] = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
300
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowerCAmelCase : Tuple = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ): '''simple docstring''' warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , snake_case , ) super().__init__(*snake_case , **snake_case )
300
1
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class __magic_name__ ( datasets.BuilderConfig ): """simple docstring""" __UpperCamelCase = None class __magic_name__ ( datasets.ArrowBasedBuilder ): """simple docstring""" __UpperCamelCase = PandasConfig def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[Any] ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) A_ : Any = dl_manager.download_and_extract(self.config.data_files ) if isinstance(snake_case , (str, list, tuple) ): A_ : int = data_files if isinstance(snake_case , snake_case ): A_ : Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A_ : Optional[int] = [dl_manager.iter_files(snake_case ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] A_ : str = [] for split_name, files in data_files.items(): if isinstance(snake_case , snake_case ): A_ : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A_ : Optional[int] = [dl_manager.iter_files(snake_case ) for file in files] splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={"files": files} ) ) return splits def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :pa.Table ): '''simple docstring''' if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example A_ : Optional[Any] = table_cast(snake_case , self.config.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str ): '''simple docstring''' for i, file in enumerate(itertools.chain.from_iterable(snake_case ) ): with open(snake_case , "rb" ) as f: A_ : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(snake_case ) ) yield i, self._cast_table(snake_case )
300
from __future__ import annotations def __snake_case ( _lowerCAmelCase : list[float] ) -> bool: if len(_lowerCAmelCase ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) A_ : List[str] = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
300
1
import re from filelock import FileLock try: import nltk _lowerCAmelCase : int = True except (ImportError, ModuleNotFoundError): _lowerCAmelCase : Any = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def __snake_case ( _lowerCAmelCase : str ) -> str: re.sub("<n>" , "" , _lowerCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
300
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging _lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ): '''simple docstring''' super().__init__() self.register_modules( vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A_ : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' self.enable_attention_slicing(snake_case ) @torch.no_grad() def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ): '''simple docstring''' if isinstance(snake_case , snake_case ): A_ : Dict = 1 elif isinstance(snake_case , snake_case ): A_ : Optional[Any] = len(snake_case ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(snake_case )}." ) # get prompt text embeddings A_ : int = self.tokenizer( snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) A_ : Dict = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A_ , A_ , A_ : int = text_embeddings.shape A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 ) A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A_ : Dict = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A_ : List[str] if negative_prompt is None: A_ : List[str] = [""] elif type(snake_case ) is not type(snake_case ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !=" f" {type(snake_case )}." ) elif isinstance(snake_case , snake_case ): A_ : Optional[Any] = [negative_prompt] elif batch_size != len(snake_case ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: A_ : Any = negative_prompt A_ : Optional[int] = text_input_ids.shape[-1] A_ : Dict = self.tokenizer( snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , ) A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A_ : Tuple = uncond_embeddings.shape[1] A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 ) A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) A_ : List[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A_ : Tuple = torch.randn( snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device ) A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to( self.device ) else: A_ : int = torch.randn( snake_case , generator=snake_case , device=self.device , dtype=snake_case ) A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case ) else: if latents_reference.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A_ : Tuple = latents_reference.to(self.device ) A_ : Any = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2 A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2 A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy A_ : Optional[Any] = 0 if dx < 0 else dx A_ : Optional[Any] = 0 if dy < 0 else dy A_ : List[str] = max(-dx , 0 ) A_ : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A_ : str = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ : List[str] = {} if accepts_eta: A_ : Union[str, Any] = eta for i, t in enumerate(self.progress_bar(snake_case ) ): # expand the latents if we are doing classifier free guidance A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case ) # predict the noise residual A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample # perform guidance if do_classifier_free_guidance: A_ , A_ : Dict = noise_pred.chunk(2 ) A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case , snake_case , snake_case ) A_ : List[str] = 1 / 0.18215 * latents A_ : Tuple = self.vae.decode(snake_case ).sample A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to( self.device ) A_ , A_ : List[str] = self.safety_checker( images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: A_ : List[str] = None if output_type == "pil": A_ : Optional[int] = self.numpy_to_pil(snake_case ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
300
1
_lowerCAmelCase : Optional[Any] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> Union[str, Any]: # Return True if there is node that has not iterated. A_ : int = [False] * len(_lowerCAmelCase ) A_ : Optional[int] = [s] A_ : List[Any] = True while queue: A_ : List[Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCAmelCase ) A_ : Optional[Any] = True A_ : Optional[Any] = u return visited[t] def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : int ) -> Optional[int]: A_ : str = [-1] * (len(_lowerCAmelCase )) A_ : List[Any] = 0 A_ : str = [] A_ : Union[str, Any] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): A_ : List[Any] = float("Inf" ) A_ : Tuple = sink while s != source: # Find the minimum value in select path A_ : Optional[Any] = min(_lowerCAmelCase , graph[parent[s]][s] ) A_ : int = parent[s] max_flow += path_flow A_ : str = sink while v != source: A_ : Optional[int] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow A_ : str = parent[v] for i in range(len(_lowerCAmelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
300
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict: A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase ) A_ : List[str] = nn.functional.normalize(_lowerCAmelCase ) return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = CLIPConfig __UpperCamelCase = ['''CLIPEncoderLayer'''] def __init__( self :int , snake_case :CLIPConfig ): '''simple docstring''' super().__init__(snake_case ) A_ : int = CLIPVisionModel(config.vision_config ) A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case ) A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case ) A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case ) A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case ) A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ): '''simple docstring''' A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output A_ : List[Any] = self.visual_projection(snake_case ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy() A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy() A_ : Union[str, Any] = [] A_ : Any = image_embeds.shape[0] for i in range(snake_case ): A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A_ : Optional[Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A_ : Optional[Any] = special_cos_dist[i][concept_idx] A_ : Tuple = self.special_care_embeds_weights[concept_idx].item() A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A_ : Any = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A_ : Tuple = cos_dist[i][concept_idx] A_ : Tuple = self.concept_embeds_weights[concept_idx].item() A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(snake_case ) result.append(snake_case ) A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ): '''simple docstring''' A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output A_ : int = self.visual_projection(snake_case ) A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds ) A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A_ : Optional[Any] = 0.0 A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) A_ : Optional[Any] = special_care * 0.01 A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
300
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase : int = { '''configuration_altclip''': [ '''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AltCLIPConfig''', '''AltCLIPTextConfig''', '''AltCLIPVisionConfig''', ], '''processing_altclip''': ['''AltCLIPProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = [ '''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AltCLIPPreTrainedModel''', '''AltCLIPModel''', '''AltCLIPTextModel''', '''AltCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys _lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
300
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]: A_ : Tuple = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict: for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" ) A_ : List[Any] = in_proj_weight[ : encoder_config.hidden_size, : ] A_ : Optional[Any] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A_ : Optional[Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any: A_ : Dict = dct.pop(_lowerCAmelCase ) A_ : List[Any] = val def __snake_case ( _lowerCAmelCase : List[str] ) -> int: if "handwritten" in checkpoint_url: A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" ) return im @torch.no_grad() def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]: A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase ) A_ : Tuple = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A_ : Tuple = 768 elif "large" in checkpoint_url: # use ViT-large encoder A_ : Optional[Any] = 1024 A_ : Union[str, Any] = 4096 A_ : Union[str, Any] = 24 A_ : List[Any] = 16 A_ : List[str] = 1024 else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Dict = False A_ : int = "relu" A_ : Optional[int] = 1024 A_ : Any = True A_ : List[Any] = False A_ : Optional[int] = False # load HuggingFace model A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase ) A_ : str = TrOCRForCausalLM(_lowerCAmelCase ) A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) model.eval() # load state_dict of original model, rename some keys A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"] A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A_ : Dict = state_dict.pop(_lowerCAmelCase ) if key.startswith("decoder" ) and "output_projection" not in key: A_ : List[str] = val else: A_ : Optional[Any] = val # load state dict model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size ) A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" ) A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase ) A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values # verify logits A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ) A_ : Tuple = outputs.logits A_ : Union[str, Any] = torch.Size([1, 1, 50265] ) if "trocr-base-handwritten" in checkpoint_url: A_ : Union[str, Any] = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: A_ : str = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: A_ : Optional[Any] = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: A_ : Optional[int] = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected" Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : List[str] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
300
1
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __snake_case ( _lowerCAmelCase : int ) -> Dict: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __snake_case ( ) -> Any: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" A_ : Any = [1, 2, 3] with pytest.raises(_lowerCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=2 ) with pytest.raises(_lowerCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> Any: A_ : str = [1, 2] A_ : Union[str, Any] = {"a": 1, "b": 2} A_ : Any = {"a": [1, 2], "b": [3, 4]} A_ : str = {"a": {"1": 1}, "b": 2} A_ : Any = {"a": 1, "b": 2, "c": 3, "d": 4} A_ : Tuple = [2, 3] A_ : Tuple = {"a": 2, "b": 3} A_ : List[Any] = {"a": [2, 3], "b": [4, 5]} A_ : List[str] = {"a": {"1": 2}, "b": 3} A_ : str = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa
300
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = 1 @register_to_config def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ): '''simple docstring''' A_ : Dict = sigma_max # setable values A_ : List[Any] = None self.set_sigmas(snake_case , snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ): '''simple docstring''' return sample def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ): '''simple docstring''' A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ): '''simple docstring''' A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(snake_case , snake_case ) A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) ) A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ): '''simple docstring''' return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) A_ : int = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda A_ : Dict = timesteps.to(self.discrete_sigmas.device ) A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device ) A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device ) A_ : Union[str, Any] = torch.zeros_like(snake_case ) A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods A_ : Optional[int] = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): A_ : Tuple = diffusion.unsqueeze(-1 ) A_ : Optional[Any] = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of A_ : List[Any] = randn_tensor( sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype ) A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device ) # compute step size from the model_output, the noise, and the snr A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term A_ : int = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): A_ : str = step_size.unsqueeze(-1 ) A_ : Optional[Any] = sample + step_size * model_output A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ): '''simple docstring''' A_ : Union[str, Any] = timesteps.to(original_samples.device ) A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps] A_ : List[Any] = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(snake_case ) * sigmas[:, None, None, None] ) A_ : Optional[int] = noise + original_samples return noisy_samples def __len__( self :Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
300
1
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ : """simple docstring""" def __init__( self :str , snake_case :List[str] , snake_case :int=13 , snake_case :int=32 , snake_case :List[str]=3 , snake_case :Union[str, Any]=4 , snake_case :int=[10, 20, 30, 40] , snake_case :List[str]=[2, 2, 3, 2] , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :List[str]=37 , snake_case :Union[str, Any]="gelu" , snake_case :Tuple=10 , snake_case :Optional[int]=0.02 , snake_case :Optional[Any]=["stage2", "stage3", "stage4"] , snake_case :int=[2, 3, 4] , snake_case :List[Any]=None , ): '''simple docstring''' A_ : str = parent A_ : Optional[int] = batch_size A_ : Optional[Any] = image_size A_ : Union[str, Any] = num_channels A_ : Tuple = num_stages A_ : List[str] = hidden_sizes A_ : Any = depths A_ : Dict = is_training A_ : Optional[Any] = use_labels A_ : str = intermediate_size A_ : Union[str, Any] = hidden_act A_ : str = num_labels A_ : Union[str, Any] = initializer_range A_ : List[Any] = out_features A_ : Tuple = out_indices A_ : Any = scope def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : int = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Optional[int] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[int] , snake_case :int , snake_case :Any ): '''simple docstring''' A_ : Union[str, Any] = ConvNextModel(config=snake_case ) model.to(snake_case ) model.eval() A_ : List[Any] = model(snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :List[str] , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Tuple = ConvNextForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Tuple = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :int , snake_case :List[Any] , snake_case :Tuple ): '''simple docstring''' A_ : Any = ConvNextBackbone(config=snake_case ) model.to(snake_case ) model.eval() A_ : Union[str, Any] = model(snake_case ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Dict = None A_ : Optional[Any] = ConvNextBackbone(config=snake_case ) model.to(snake_case ) model.eval() A_ : List[Any] = model(snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = self.prepare_config_and_inputs() A_ , A_ , A_ : Tuple = config_and_inputs A_ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) __UpperCamelCase = ( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : str = ConvNextModelTester(self ) A_ : List[str] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return @unittest.skip(reason="ConvNext does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' pass @unittest.skip(reason="ConvNext does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' pass @unittest.skip(reason="ConvNext does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : int = model_class(snake_case ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Union[str, Any] = [*signature.parameters.keys()] A_ : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(snake_case :int , snake_case :str , snake_case :List[Any] ): A_ : List[Any] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): A_ : Dict = model(**self._prepare_for_class(snake_case , snake_case ) ) A_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[str] = self.model_tester.num_stages self.assertEqual(len(snake_case ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Union[str, Any] = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : Dict = True check_hidden_states_output(snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = ConvNextModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __snake_case ( ) -> Union[str, Any]: A_ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(snake_case ) A_ : List[Any] = self.default_image_processor A_ : Any = prepare_img() A_ : int = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case ) # forward pass with torch.no_grad(): A_ : Optional[Any] = model(**snake_case ) # verify the logits A_ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) A_ : Dict = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) ) @require_torch class __magic_name__ ( unittest.TestCase , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = (ConvNextBackbone,) if is_torch_available() else () __UpperCamelCase = ConvNextConfig __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Optional[int] = ConvNextModelTester(self )
300
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float: A_ : Dict = a while True: A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - ( Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307 return float(_lowerCAmelCase ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
300
1
import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) _lowerCAmelCase : List[Any] = 50 # max width of layer names _lowerCAmelCase : Optional[int] = 70 # max width of quantizer names def __snake_case ( _lowerCAmelCase : List[str] ) -> Optional[Any]: A_ : str = parser.add_argument_group("quant_trainer arguments" ) group.add_argument("--wprec" , type=_lowerCAmelCase , default=8 , help="weight precision" ) group.add_argument("--aprec" , type=_lowerCAmelCase , default=8 , help="activation precision" ) group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" ) group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" ) group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" ) group.add_argument("--quant-disable-keyword" , type=_lowerCAmelCase , nargs="+" , help="disable quantizers by keyword" ) group.add_argument("--quant-disable-layer-module" , type=_lowerCAmelCase , help="disable quantizers by keyword under layer." ) group.add_argument("--quant-enable-layer-module" , type=_lowerCAmelCase , help="enable quantizers by keyword under layer" ) group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" ) group.add_argument("--percentile" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="percentile for PercentileCalibrator" ) group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" ) group.add_argument("--clip-gelu" , metavar="N" , type=_lowerCAmelCase , help="clip gelu output maximum value to N" ) group.add_argument( "--recalibrate-weights" , action="store_true" , help=( "recalibrate weight amaxes by taking the max of the weights." " amaxes will be computed with the current quantization granularity (axis)." ) , ) def __snake_case ( _lowerCAmelCase : Dict ) -> Union[str, Any]: if args.calibrator == "max": A_ : str = "max" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("Specify --percentile when using percentile calibrator" ) A_ : Optional[int] = "histogram" elif args.calibrator == "mse": A_ : Dict = "histogram" else: raise ValueError(f"Invalid calibrator {args.calibrator}" ) A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCAmelCase ) A_ : Union[str, Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCAmelCase ) quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : str=False ) -> List[Any]: logger.info("Configuring Model for Quantization" ) logger.info(f"using quantization package {pytorch_quantization.__file__}" ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_lowerCAmelCase , ["embeddings"] , which="weight" , _disabled=_lowerCAmelCase ) if args.quant_disable: set_quantizer_by_name(_lowerCAmelCase , [""] , _disabled=_lowerCAmelCase ) if args.quant_disable_keyword: set_quantizer_by_name(_lowerCAmelCase , args.quant_disable_keyword , _disabled=_lowerCAmelCase ) if args.quant_disable_layer_module: set_quantizer_by_name(_lowerCAmelCase , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=_lowerCAmelCase ) if args.quant_enable_layer_module: set_quantizer_by_name(_lowerCAmelCase , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=_lowerCAmelCase ) if args.recalibrate_weights: recalibrate_weights(_lowerCAmelCase ) if args.fuse_qkv: fuse_qkv(_lowerCAmelCase , _lowerCAmelCase ) if args.clip_gelu: clip_gelu(_lowerCAmelCase , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Dict ) -> Optional[int]: logger.info("Enabling Calibration" ) for name, module in model.named_modules(): if name.endswith("_quantizer" ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f"{name:80}: {module}" ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ) -> Any: logger.info("Loading calibrated amax" ) for name, module in model.named_modules(): if name.endswith("_quantizer" ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax("percentile" , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ) -> List[Any]: def fusea(_lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ): for mod in [qq, qk, qv]: if not hasattr(_lowerCAmelCase , "_amax" ): print(" WARNING: NO AMAX BUFFER" ) return A_ : List[str] = qq._amax.detach().item() A_ : str = qk._amax.detach().item() A_ : Dict = qv._amax.detach().item() A_ : Union[str, Any] = max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) qq._amax.fill_(_lowerCAmelCase ) qk._amax.fill_(_lowerCAmelCase ) qv._amax.fill_(_lowerCAmelCase ) logger.info(f" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" ) for name, mod in model.named_modules(): if name.endswith(".attention.self" ): logger.info(f"FUSE_QKV: {name:{name_width}}" ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Union[str, Any]: for name, mod in model.named_modules(): if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ): A_ : Union[str, Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCAmelCase ) A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item() logger.info(f"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" ) def __snake_case ( _lowerCAmelCase : Tuple ) -> Tuple: for name, mod in model.named_modules(): if hasattr(_lowerCAmelCase , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None: A_ : Tuple = mod.weight.shape[0] A_ : Optional[int] = mod._weight_quantizer._amax.detach() A_ : Dict = torch.ones(_lowerCAmelCase , dtype=amax.dtype , device=amax.device ) * amax print(f"expanding {name} {amax} -> {mod._weight_quantizer._amax}" ) def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> int: for name, mod in model.named_modules(): if hasattr(_lowerCAmelCase , "_weight_quantizer" ): if not hasattr(mod.weight_quantizer , "_amax" ): print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) A_ : Tuple = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) A_ : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set A_ : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCAmelCase , keepdims=_lowerCAmelCase ).detach() logger.info(f"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" ) A_ : List[str] = amax def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=25 , _lowerCAmelCase : Union[str, Any]=180 , _lowerCAmelCase : Dict=None ) -> Optional[Any]: if ignore is None: A_ : Tuple = [] elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ): A_ : List[Any] = [ignore] A_ : Any = 0 for name, mod in model.named_modules(): if not hasattr(_lowerCAmelCase , "weight" ): continue A_ : Union[str, Any] = max(_lowerCAmelCase , len(_lowerCAmelCase ) ) for name, mod in model.named_modules(): A_ : Any = getattr(_lowerCAmelCase , "_input_quantizer" , _lowerCAmelCase ) A_ : Union[str, Any] = getattr(_lowerCAmelCase , "_weight_quantizer" , _lowerCAmelCase ) if not hasattr(_lowerCAmelCase , "weight" ): continue if type(_lowerCAmelCase ) in ignore: continue if [True for s in ignore if type(_lowerCAmelCase ) is str and s in name]: continue A_ : int = f"Act:{input_q.extra_repr()}" A_ : Optional[int] = f"Wgt:{weight_q.extra_repr()}" A_ : List[Any] = f"{name:{name_width}} {act_str} {wgt_str}" if len(_lowerCAmelCase ) <= line_width: logger.info(_lowerCAmelCase ) else: logger.info(f"{name:{name_width}} {act_str}" ) logger.info(f"{' ':{name_width}} {wgt_str}" ) def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> str: A_ : Any = 0 for name, mod in model.named_modules(): if isinstance(_lowerCAmelCase , pytorch_quantization.nn.TensorQuantizer ): print(f"{name:80} {mod}" ) count += 1 print(f"{count} TensorQuantizers found in model" ) def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> Any: A_ : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if quantizer_mod is not None: assert hasattr(_lowerCAmelCase , _lowerCAmelCase ) setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: logger.warning(f"{name} has no {quantizer}" ) def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int]="both" , **_lowerCAmelCase : List[str] ) -> str: A_ : int = f"Warning: changing {which} quantizers of {name:{qname_width}}" for k, v in kwargs.items(): s += f" {k}={v}" if which in ["input", "both"]: set_quantizer(_lowerCAmelCase , _lowerCAmelCase , "_input_quantizer" , _lowerCAmelCase , _lowerCAmelCase ) if which in ["weight", "both"]: set_quantizer(_lowerCAmelCase , _lowerCAmelCase , "_weight_quantizer" , _lowerCAmelCase , _lowerCAmelCase ) logger.info(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , **_lowerCAmelCase : Tuple ) -> Any: for name, mod in model.named_modules(): if hasattr(_lowerCAmelCase , "_input_quantizer" ) or hasattr(_lowerCAmelCase , "_weight_quantizer" ): for n in names: if re.search(_lowerCAmelCase , _lowerCAmelCase ): set_quantizers(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) elif name.endswith("_quantizer" ): for n in names: if re.search(_lowerCAmelCase , _lowerCAmelCase ): A_ : Optional[int] = f"Warning: changing {name:{name_width}}" for k, v in kwargs.items(): s += f" {k}={v}" setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) logger.info(_lowerCAmelCase )
300
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets _lowerCAmelCase : List[Any] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' _lowerCAmelCase : Union[str, Any] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' _lowerCAmelCase : Optional[Any] = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' A_ : List[str] = len(references[0] ) if any(len(snake_case ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A_ : int = [[refs[i] for refs in references] for i in range(snake_case )] A_ : Optional[Any] = TER( normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , ) A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
300
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''data2vec-vision''' def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ): '''simple docstring''' super().__init__(**snake_case ) A_ : Dict = hidden_size A_ : Tuple = num_hidden_layers A_ : List[str] = num_attention_heads A_ : Any = intermediate_size A_ : Optional[Any] = hidden_act A_ : Any = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Optional[Any] = initializer_range A_ : List[str] = layer_norm_eps A_ : str = image_size A_ : Optional[int] = patch_size A_ : int = num_channels A_ : Optional[Any] = use_mask_token A_ : Optional[Any] = use_absolute_position_embeddings A_ : Optional[int] = use_relative_position_bias A_ : Dict = use_shared_relative_position_bias A_ : Any = layer_scale_init_value A_ : Optional[Any] = drop_path_rate A_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Tuple = out_indices A_ : Optional[Any] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : str = use_auxiliary_head A_ : List[Any] = auxiliary_loss_weight A_ : List[str] = auxiliary_channels A_ : Dict = auxiliary_num_convs A_ : List[str] = auxiliary_concat_input A_ : Optional[int] = semantic_loss_ignore_index class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return 1e-4
300
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str: return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Any: return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=float("inf" ) ) -> int: for i in range(points_counts - 1 ): for j in range(i + 1 , _lowerCAmelCase ): A_ : Tuple = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=float("inf" ) ) -> Dict: for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ): for j in range(max(0 , i - 6 ) , _lowerCAmelCase ): A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]: # base case if points_counts <= 3: return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase ) # recursion A_ : Optional[int] = points_counts // 2 A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase ) A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid ) A_ : Tuple = min(_lowerCAmelCase , _lowerCAmelCase ) A_ : Dict = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowerCAmelCase ) A_ : Tuple = dis_between_closest_in_strip( _lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase ) return min(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Any: A_ : Optional[Any] = column_based_sort(_lowerCAmelCase , column=0 ) A_ : Optional[int] = column_based_sort(_lowerCAmelCase , column=1 ) return ( closest_pair_of_points_sqr( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) ** 0.5 if __name__ == "__main__": _lowerCAmelCase : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
300
1
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowerCAmelCase : int = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : Union[tf.Tensor, np.ndarray] ) -> List[int]: if isinstance(_lowerCAmelCase , np.ndarray ): return list(tensor.shape ) A_ : Any = tf.shape(_lowerCAmelCase ) if tensor.shape == tf.TensorShape(_lowerCAmelCase ): return dynamic A_ : Dict = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_lowerCAmelCase )] def __snake_case ( _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[str] = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9 , axis=_lowerCAmelCase , name=_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any]=1e-5 , _lowerCAmelCase : Any=-1 ) -> str: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ): raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." ) # Get mean and variance on the axis to be normalized A_ , A_ : Union[str, Any] = tf.nn.moments(_lowerCAmelCase , axes=[axis] , keepdims=_lowerCAmelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis A_ : str = [1] * inputs.shape.rank A_ : Tuple = shape_list(_lowerCAmelCase )[axis] A_ : Tuple = tf.reshape(_lowerCAmelCase , _lowerCAmelCase ) A_ : int = tf.reshape(_lowerCAmelCase , _lowerCAmelCase ) # Compute layer normalization using the batch_normalization # function. A_ : Tuple = tf.nn.batch_normalization( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , offset=_lowerCAmelCase , scale=_lowerCAmelCase , variance_epsilon=_lowerCAmelCase , ) return outputs def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Any=-1 ) -> int: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input A_ : str = tf.shape(_lowerCAmelCase ) A_ : Any = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) A_ : List[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : tf.Tensor ) -> tf.Tensor: if not isinstance(_lowerCAmelCase , tf.Tensor ): A_ : Any = tf.convert_to_tensor(_lowerCAmelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: A_ : str = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: A_ : List[str] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) A_ : Optional[Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __snake_case ( _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : int , _lowerCAmelCase : str = "input_ids" ) -> None: tf.debugging.assert_less( _lowerCAmelCase , tf.cast(_lowerCAmelCase , dtype=tensor.dtype ) , message=( f"The maximum value of {tensor_name} ({tf.math.reduce_max(_lowerCAmelCase )}) must be smaller than the embedding " f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ) , ) def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Tuple: A_ : str = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. A_ : int = [x for x in data if len(_lowerCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " f"bytes: {bad_attributes}" ) A_ : Dict = np.asarray(_lowerCAmelCase ) A_ : Dict = 1 A_ : str = np.array_split(_lowerCAmelCase , _lowerCAmelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 A_ : str = np.array_split(_lowerCAmelCase , _lowerCAmelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_lowerCAmelCase ): A_ : str = chunk_data else: A_ : str = data def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ) -> Optional[Any]: if name in group.attrs: A_ : List[str] = [n.decode("utf8" ) if hasattr(_lowerCAmelCase , "decode" ) else n for n in group.attrs[name]] else: A_ : Any = [] A_ : List[str] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("utf8" ) if hasattr(_lowerCAmelCase , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] ) chunk_id += 1 return data def __snake_case ( _lowerCAmelCase : Any ) -> List[str]: def _expand_single_ad_tensor(_lowerCAmelCase : str ): if isinstance(_lowerCAmelCase , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_lowerCAmelCase , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , _lowerCAmelCase )
300
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : """simple docstring""" def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ): '''simple docstring''' A_ : Tuple = parent A_ : int = batch_size A_ : List[str] = image_size A_ : List[Any] = patch_size A_ : Optional[Any] = num_channels A_ : List[Any] = is_training A_ : Tuple = use_labels A_ : Union[str, Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Any = num_attention_heads A_ : List[str] = intermediate_size A_ : Optional[int] = hidden_act A_ : List[str] = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Any = type_sequence_label_size A_ : List[str] = initializer_range A_ : Dict = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Optional[int] = (image_size // patch_size) ** 2 A_ : List[str] = num_patches + 1 def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Tuple = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ): '''simple docstring''' A_ : Optional[Any] = ViTMSNModel(config=snake_case ) model.to(snake_case ) model.eval() A_ : int = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ): '''simple docstring''' A_ : Dict = self.type_sequence_label_size A_ : Tuple = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Union[str, Any] = model(snake_case , labels=snake_case ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : Union[str, Any] = 1 A_ : int = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Optional[Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : List[str] = self.prepare_config_and_inputs() A_ , A_ , A_ : Optional[int] = config_and_inputs A_ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __UpperCamelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Tuple = ViTMSNModelTester(self ) A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(snake_case ) A_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[str] = [*signature.parameters.keys()] A_ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __snake_case ( ) -> Optional[Any]: A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' torch.manual_seed(2 ) A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case ) A_ : List[str] = self.default_image_processor A_ : int = prepare_img() A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case ) # forward pass with torch.no_grad(): A_ : Optional[int] = model(**snake_case ) # verify the logits A_ : List[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
300
1
import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification _lowerCAmelCase : Optional[Any] = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co _lowerCAmelCase : Tuple = '''main''' # Default branch name _lowerCAmelCase : Tuple = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) _lowerCAmelCase : int = '''aaaaaaa''' # This commit does not exist, so we should 404. _lowerCAmelCase : List[str] = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes _lowerCAmelCase : str = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def __snake_case ( ) -> List[Any]: print("Welcome!" ) yield print("Bye!" ) @contextlib.contextmanager def __snake_case ( ) -> Union[str, Any]: print("Bonjour!" ) yield print("Au revoir!" ) class __magic_name__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' assert transformers.__spec__ is not None assert importlib.util.find_spec("transformers" ) is not None class __magic_name__ ( unittest.TestCase ): """simple docstring""" @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :List[str] ): '''simple docstring''' with ContextManagers([] ): print("Transformers are awesome!" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" ) @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :str ): '''simple docstring''' with ContextManagers([context_en()] ): print("Transformers are awesome!" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" ) @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict ): '''simple docstring''' with ContextManagers([context_fr(), context_en()] ): print("Transformers are awesome!" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" ) @require_torch def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' self.assertEqual(find_labels(snake_case ) , ["labels"] ) self.assertEqual(find_labels(snake_case ) , ["labels", "next_sentence_label"] ) self.assertEqual(find_labels(snake_case ) , ["start_positions", "end_positions"] ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" pass self.assertEqual(find_labels(snake_case ) , ["labels"] ) @require_tf def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' self.assertEqual(find_labels(snake_case ) , ["labels"] ) self.assertEqual(find_labels(snake_case ) , ["labels", "next_sentence_label"] ) self.assertEqual(find_labels(snake_case ) , ["start_positions", "end_positions"] ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" pass self.assertEqual(find_labels(snake_case ) , ["labels"] ) @require_flax def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' self.assertEqual(find_labels(snake_case ) , [] ) self.assertEqual(find_labels(snake_case ) , [] ) self.assertEqual(find_labels(snake_case ) , [] ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" pass self.assertEqual(find_labels(snake_case ) , [] )
300
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = (DDPMScheduler,) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ): '''simple docstring''' A_ : Dict = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**snake_case ) return config def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case , beta_end=snake_case ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.check_over_configs(thresholding=snake_case ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Tuple = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : List[str] = scheduler_class(**snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : int = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : int = scheduler_class(**snake_case ) A_ : Tuple = len(snake_case ) A_ : List[str] = self.dummy_model() A_ : Optional[Any] = self.dummy_sample_deter A_ : List[str] = torch.manual_seed(0 ) for t in reversed(range(snake_case ) ): # 1. predict noise residual A_ : Tuple = model(snake_case , snake_case ) # 2. predict previous mean of sample x_t-1 A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : Optional[int] = pred_prev_sample A_ : Tuple = torch.sum(torch.abs(snake_case ) ) A_ : str = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Optional[int] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config(prediction_type="v_prediction" ) A_ : List[str] = scheduler_class(**snake_case ) A_ : int = len(snake_case ) A_ : Dict = self.dummy_model() A_ : str = self.dummy_sample_deter A_ : Any = torch.manual_seed(0 ) for t in reversed(range(snake_case ) ): # 1. predict noise residual A_ : Optional[int] = model(snake_case , snake_case ) # 2. predict previous mean of sample x_t-1 A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : List[str] = pred_prev_sample A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) ) A_ : List[str] = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.scheduler_classes[0] A_ : Optional[Any] = self.get_scheduler_config() A_ : Dict = scheduler_class(**snake_case ) A_ : Optional[int] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=snake_case ) A_ : Optional[int] = scheduler.timesteps for i, timestep in enumerate(snake_case ): if i == len(snake_case ) - 1: A_ : str = -1 else: A_ : List[str] = timesteps[i + 1] A_ : Optional[int] = scheduler.previous_timestep(snake_case ) A_ : List[str] = prev_t.item() self.assertEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Optional[Any] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config() A_ : Tuple = scheduler_class(**snake_case ) A_ : List[str] = [100, 87, 50, 51, 0] with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Any = self.scheduler_classes[0] A_ : Union[str, Any] = self.get_scheduler_config() A_ : Optional[int] = scheduler_class(**snake_case ) A_ : Union[str, Any] = [100, 87, 50, 1, 0] A_ : Optional[int] = len(snake_case ) with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Union[str, Any] = self.scheduler_classes[0] A_ : Optional[Any] = self.get_scheduler_config() A_ : Optional[int] = scheduler_class(**snake_case ) A_ : Optional[int] = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=snake_case )
300
1
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __snake_case ( _lowerCAmelCase : List[Any] ) -> Tuple: A_ : List[str] = checkpoints.load_tax_checkpoint(_lowerCAmelCase ) A_ : Dict = flatten_dict(_lowerCAmelCase ) return flax_params def __snake_case ( _lowerCAmelCase : List[Any] ) -> List[str]: A_ : List[Any] = {} A_ : int = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } A_ : List[Any] = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key A_ : str = ".".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): A_ : Optional[Any] = new_key.replace(_lowerCAmelCase , _lowerCAmelCase ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): A_ : Any = new_key.replace(_lowerCAmelCase , _lowerCAmelCase ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number A_ : Dict = re.sub(r"layers_(\d+)" , r"layer.\1" , _lowerCAmelCase ) A_ : List[Any] = new_key.replace("encoder" , "encoder.encoder" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number A_ : List[Any] = re.sub(r"layers_(\d+)" , r"layer.\1" , _lowerCAmelCase ) A_ : List[str] = flax_dict[key] A_ : Dict = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): A_ : List[str] = torch.from_numpy(converted_dict[key].T ) else: A_ : Optional[int] = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int=False , _lowerCAmelCase : str=False ) -> List[Any]: A_ : int = get_flax_param(_lowerCAmelCase ) if not use_large: A_ : Union[str, Any] = PixaStructVisionConfig() A_ : Optional[Any] = PixaStructTextConfig() else: A_ : int = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) A_ : List[Any] = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) A_ : Optional[int] = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_lowerCAmelCase ) A_ : Dict = PixaStructForConditionalGeneration(_lowerCAmelCase ) A_ : Dict = rename_and_convert_flax_params(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) A_ : List[str] = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" ) A_ : List[Any] = PixaStructImageProcessor() A_ : str = PixaStructProcessor(image_processor=_lowerCAmelCase , tokenizer=_lowerCAmelCase ) if use_large: A_ : List[Any] = 4096 A_ : Union[str, Any] = True # mkdir if needed os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) print("Model saved in {}".format(_lowerCAmelCase ) ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') _lowerCAmelCase : Any = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
300
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : int = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]: for attribute in key.split("." ): A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: A_ : Tuple = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Any = value elif weight_type == "bias": A_ : str = value else: A_ : Any = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]: A_ : Optional[Any] = [] A_ : Any = fairseq_model.state_dict() A_ : Union[str, Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ : str = None for name, value in fairseq_dict.items(): A_ : Tuple = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) A_ : Optional[Any] = True elif name.split("." )[0] == "proj": A_ : Dict = fairseq_model.proj A_ : List[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ : int = True if "*" in mapped_key: A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2] A_ : int = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: A_ : List[Any] = "weight_g" elif "weight_v" in name: A_ : List[Any] = "weight_v" elif "bias" in name: A_ : Dict = "bias" elif "weight" in name: A_ : List[Any] = "weight" else: A_ : Dict = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: A_ : Any = full_name.split("conv_layers." )[-1] A_ : Optional[int] = name.split("." ) A_ : Optional[Any] = int(items[0] ) A_ : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) A_ : List[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) A_ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) A_ : List[Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) A_ : Tuple = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str: A_ , A_ : List[str] = emb.weight.shape A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) A_ : List[Any] = emb.weight.data return lin_layer def __snake_case ( _lowerCAmelCase : str ) -> Tuple: with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: A_ : int = f.readlines() A_ : Dict = [line.split(" " )[0] for line in lines] A_ : Tuple = len(_lowerCAmelCase ) A_ : Union[str, Any] = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple: A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) A_ : str = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) A_ : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ : Union[str, Any] = model[0].eval() # set weights for wav2vec2 encoder A_ : Tuple = WavaVecaModel(_lowerCAmelCase ) A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase ) A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) A_ : Optional[Any] = False # add projection layer A_ : Optional[Any] = nn.Parameter(projection_layer.weight ) A_ : int = nn.Parameter(projection_layer.bias ) A_ : str = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) A_ : Optional[int] = hf_wavavec.config.to_dict() A_ : int = tokenizer.pad_token_id A_ : List[str] = tokenizer.bos_token_id A_ : List[str] = tokenizer.eos_token_id A_ : List[str] = "speech_to_text_2" A_ : Tuple = "wav2vec2" A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') _lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
300
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Tuple = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''unispeech''' def __init__( self :Any , snake_case :str=32 , snake_case :Optional[Any]=768 , snake_case :int=12 , snake_case :List[str]=12 , snake_case :str=3_072 , snake_case :Optional[int]="gelu" , snake_case :int=0.1 , snake_case :Any=0.1 , snake_case :List[Any]=0.1 , snake_case :Optional[int]=0.0 , snake_case :int=0.0 , snake_case :int=0.1 , snake_case :List[Any]=0.1 , snake_case :Dict=0.02 , snake_case :int=1e-5 , snake_case :str="group" , snake_case :Optional[int]="gelu" , snake_case :Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , snake_case :Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case :Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , snake_case :List[str]=False , snake_case :Union[str, Any]=128 , snake_case :List[str]=16 , snake_case :str=False , snake_case :int=True , snake_case :Any=0.05 , snake_case :Union[str, Any]=10 , snake_case :List[Any]=2 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :List[str]=0 , snake_case :List[str]=320 , snake_case :Tuple=2 , snake_case :List[str]=0.1 , snake_case :int=100 , snake_case :Tuple=256 , snake_case :Optional[Any]=256 , snake_case :Dict=0.1 , snake_case :Any="mean" , snake_case :Any=False , snake_case :Dict=False , snake_case :List[Any]=256 , snake_case :Optional[int]=80 , snake_case :Union[str, Any]=0 , snake_case :Tuple=1 , snake_case :Tuple=2 , snake_case :Union[str, Any]=0.5 , **snake_case :Tuple , ): '''simple docstring''' super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case ) A_ : str = hidden_size A_ : List[Any] = feat_extract_norm A_ : Any = feat_extract_activation A_ : int = list(snake_case ) A_ : Optional[Any] = list(snake_case ) A_ : Tuple = list(snake_case ) A_ : str = conv_bias A_ : str = num_conv_pos_embeddings A_ : Optional[Any] = num_conv_pos_embedding_groups A_ : List[Any] = len(self.conv_dim ) A_ : str = num_hidden_layers A_ : Union[str, Any] = intermediate_size A_ : str = hidden_act A_ : Optional[Any] = num_attention_heads A_ : str = hidden_dropout A_ : Optional[int] = attention_dropout A_ : Dict = activation_dropout A_ : int = feat_proj_dropout A_ : Optional[int] = final_dropout A_ : Optional[Any] = layerdrop A_ : Tuple = layer_norm_eps A_ : Any = initializer_range A_ : Dict = num_ctc_classes A_ : List[str] = vocab_size A_ : Union[str, Any] = do_stable_layer_norm A_ : Dict = use_weighted_layer_sum A_ : int = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : Optional[int] = apply_spec_augment A_ : int = mask_time_prob A_ : List[Any] = mask_time_length A_ : Optional[Any] = mask_time_min_masks A_ : List[Any] = mask_feature_prob A_ : List[Any] = mask_feature_length A_ : List[str] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations A_ : int = num_codevectors_per_group A_ : Union[str, Any] = num_codevector_groups A_ : str = contrastive_logits_temperature A_ : List[str] = feat_quantizer_dropout A_ : Optional[Any] = num_negatives A_ : Tuple = codevector_dim A_ : str = proj_codevector_dim A_ : Optional[int] = diversity_loss_weight # ctc loss A_ : Optional[int] = ctc_loss_reduction A_ : Dict = ctc_zero_infinity # pretraining loss A_ : str = replace_prob @property def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
300
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : """simple docstring""" def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ): '''simple docstring''' A_ : str = parent A_ : str = batch_size A_ : str = seq_length A_ : Any = is_training A_ : Any = use_input_mask A_ : str = use_token_type_ids A_ : Tuple = use_labels A_ : Optional[Any] = vocab_size A_ : Dict = hidden_size A_ : str = num_hidden_layers A_ : Dict = num_attention_heads A_ : str = intermediate_size A_ : int = hidden_act A_ : List[Any] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : List[Any] = type_vocab_size A_ : Any = type_sequence_label_size A_ : Dict = initializer_range A_ : Any = num_labels A_ : Optional[int] = num_choices A_ : Optional[Any] = scope A_ : Any = range_bbox def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A_ : str = bbox[i, j, 3] A_ : Union[str, Any] = bbox[i, j, 1] A_ : List[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: A_ : Any = bbox[i, j, 2] A_ : Tuple = bbox[i, j, 0] A_ : int = t A_ : int = tf.convert_to_tensor(snake_case ) A_ : Any = None if self.use_input_mask: A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : str = None if self.use_token_type_ids: A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Dict = None A_ : List[Any] = None A_ : List[str] = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : str = ids_tensor([self.batch_size] , self.num_choices ) A_ : int = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ): '''simple docstring''' A_ : Any = TFLayoutLMModel(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) A_ : str = model(snake_case , snake_case , token_type_ids=snake_case ) A_ : List[Any] = model(snake_case , snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ): '''simple docstring''' A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Union[str, Any] = self.num_labels A_ : int = TFLayoutLMForSequenceClassification(config=snake_case ) A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.num_labels A_ : str = TFLayoutLMForTokenClassification(config=snake_case ) A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case ) A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = config_and_inputs A_ : Optional[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 10 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Tuple = TFLayoutLMModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def __snake_case ( ) -> Optional[Any]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs() # forward pass A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the sequence output on [0, :3, :3] A_ : List[Any] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) ) # test the pooled output on [1, :3] A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs() # forward pass A_ : Dict = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar A_ : List[str] = outputs.loss A_ : Union[str, Any] = (2,) self.assertEqual(loss.shape , snake_case ) # test the shape of the logits A_ : Tuple = outputs.logits A_ : Tuple = (2, 2) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) # test the shape of the logits A_ : Dict = outputs.logits A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the shape of the logits A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , snake_case ) self.assertEqual(outputs.end_logits.shape , snake_case )
300
1
from sklearn.metrics import matthews_corrcoef import datasets _lowerCAmelCase : str = ''' Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] ''' _lowerCAmelCase : Any = ''' Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 ''' _lowerCAmelCase : Tuple = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html" ] , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :str , snake_case :Optional[Any] , snake_case :List[Any]=None ): '''simple docstring''' return { "matthews_correlation": float(matthews_corrcoef(snake_case , snake_case , sample_weight=snake_case ) ), }
300
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore _lowerCAmelCase : Optional[int] = ''' Human: <<task>> Assistant: ''' _lowerCAmelCase : int = '''huggingface-tools/default-prompts''' _lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''} def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]: if prompt_or_repo_id is None: A_ : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id A_ : Optional[Any] = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
300
1
def __snake_case ( _lowerCAmelCase : list ) -> int: if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] A_ : Optional[int] = grid[0] for row_n in range(1 , len(_lowerCAmelCase ) ): A_ : Optional[Any] = grid[row_n] A_ : List[Any] = fill_row(_lowerCAmelCase , _lowerCAmelCase ) A_ : Dict = grid[row_n] return grid[-1][-1] def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : list ) -> list: current_row[0] += row_above[0] for cell_n in range(1 , len(_lowerCAmelCase ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
300
def __snake_case ( _lowerCAmelCase : list ) -> list: if len(_lowerCAmelCase ) <= 1: return [tuple(_lowerCAmelCase )] A_ : Tuple = [] def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ): A_ : List[str] = [0] * n res.append(tuple(_lowerCAmelCase ) ) A_ : int = 0 while i < n: if c[i] < i: if i % 2 == 0: A_ , A_ : str = arr[i], arr[0] else: A_ , A_ : List[str] = arr[i], arr[c[i]] res.append(tuple(_lowerCAmelCase ) ) c[i] += 1 A_ : Tuple = 0 else: A_ : Dict = 0 i += 1 generate(len(_lowerCAmelCase ) , _lowerCAmelCase ) return res if __name__ == "__main__": _lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip() _lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
300
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : str = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''timm_backbone''' def __init__( self :List[str] , snake_case :Union[str, Any]=None , snake_case :Any=3 , snake_case :Union[str, Any]=True , snake_case :Any=True , snake_case :Optional[Any]=None , **snake_case :List[Any] , ): '''simple docstring''' super().__init__(**snake_case ) A_ : Optional[Any] = backbone A_ : Dict = num_channels A_ : str = features_only A_ : Tuple = use_pretrained_backbone A_ : List[Any] = True A_ : Optional[Any] = out_indices if out_indices is not None else (-1,)
300
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer _lowerCAmelCase : int = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase : List[Any] = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } _lowerCAmelCase : Any = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = RobertaTokenizer def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ): '''simple docstring''' super().__init__( snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , ) A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space: A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) ) A_ : Optional[int] = add_prefix_space A_ : int = pre_tok_class(**snake_case ) A_ : Optional[int] = add_prefix_space A_ : Optional[int] = "post_processor" A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case ) if tokenizer_component_instance: A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : List[Any] = tuple(state["sep"] ) if "cls" in state: A_ : Optional[Any] = tuple(state["cls"] ) A_ : Tuple = False if state.get("add_prefix_space" , snake_case ) != add_prefix_space: A_ : List[Any] = add_prefix_space A_ : Optional[int] = True if state.get("trim_offsets" , snake_case ) != trim_offsets: A_ : List[str] = trim_offsets A_ : Any = True if changes_to_apply: A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) ) A_ : Any = component_class(**snake_case ) setattr(self.backend_tokenizer , snake_case , snake_case ) @property def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ): '''simple docstring''' A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value A_ : Any = value def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ): '''simple docstring''' A_ : Any = kwargs.get("is_split_into_words" , snake_case ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ): '''simple docstring''' A_ : Any = kwargs.get("is_split_into_words" , snake_case ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ): '''simple docstring''' A_ : str = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ): '''simple docstring''' A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ): '''simple docstring''' A_ : Any = [self.sep_token_id] A_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
300
1
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__) @dataclass class __magic_name__ ( datasets.BuilderConfig ): """simple docstring""" __UpperCamelCase = None __UpperCamelCase = "utf-8" __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = True # deprecated __UpperCamelCase = None # deprecated __UpperCamelCase = 10 << 20 # 10MB __UpperCamelCase = None class __magic_name__ ( datasets.ArrowBasedBuilder ): """simple docstring""" __UpperCamelCase = JsonConfig def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' if self.config.block_size is not None: logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" ) A_ : List[str] = self.config.block_size if self.config.use_threads is not True: logger.warning( "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." ) if self.config.newlines_in_values is not None: raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" ) return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Union[str, Any] ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) A_ : Dict = dl_manager.download_and_extract(self.config.data_files ) if isinstance(snake_case , (str, list, tuple) ): A_ : int = data_files if isinstance(snake_case , snake_case ): A_ : List[Any] = [files] A_ : int = [dl_manager.iter_files(snake_case ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] A_ : List[str] = [] for split_name, files in data_files.items(): if isinstance(snake_case , snake_case ): A_ : Optional[int] = [files] A_ : Dict = [dl_manager.iter_files(snake_case ) for file in files] splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={"files": files} ) ) return splits def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :pa.Table ): '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A_ : Optional[Any] = self.config.features.arrow_schema.field(snake_case ).type A_ : int = pa_table.append_column(snake_case , pa.array([None] * len(snake_case ) , type=snake_case ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A_ : Union[str, Any] = table_cast(snake_case , self.config.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Optional[int] ): '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : Optional[int] = json.load(snake_case ) # We keep only the field we are interested in A_ : Tuple = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(snake_case , (list, tuple) ): A_ : List[Any] = set().union(*[row.keys() for row in dataset] ) A_ : List[Any] = {col: [row.get(snake_case ) for row in dataset] for col in keys} else: A_ : Optional[Any] = dataset A_ : List[Any] = pa.Table.from_pydict(snake_case ) yield file_idx, self._cast_table(snake_case ) # If the file has one json object per line else: with open(snake_case , "rb" ) as f: A_ : List[Any] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A_ : int = max(self.config.chunksize // 32 , 16 << 10 ) A_ : List[Any] = ( self.config.encoding_errors if self.config.encoding_errors is not None else "strict" ) while True: A_ : str = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(snake_case ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A_ : Any = batch.decode(self.config.encoding , errors=snake_case ).encode("utf-8" ) try: while True: try: A_ : int = paj.read_json( io.BytesIO(snake_case ) , read_options=paj.ReadOptions(block_size=snake_case ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(snake_case , pa.ArrowInvalid ) and "straddling" not in str(snake_case ) or block_size > len(snake_case ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"Batch of {len(snake_case )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : List[Any] = json.load(snake_case ) except json.JSONDecodeError: logger.error(f"Failed to read file '{file}' with error {type(snake_case )}: {e}" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(snake_case , snake_case ): # list is the only sequence type supported in JSON try: A_ : List[str] = set().union(*[row.keys() for row in dataset] ) A_ : Dict = {col: [row.get(snake_case ) for row in dataset] for col in keys} A_ : Tuple = pa.Table.from_pydict(snake_case ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"Failed to read file '{file}' with error {type(snake_case )}: {e}" ) raise ValueError(f"Not able to read records in the JSON file at {file}." ) from None yield file_idx, self._cast_table(snake_case ) break else: logger.error(f"Failed to read file '{file}' with error {type(snake_case )}: {e}" ) raise ValueError( f"Not able to read records in the JSON file at {file}. " f"You should probably indicate the field of the JSON file containing your records. " f"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. " f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(snake_case ) batch_idx += 1
300
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCAmelCase : int = '''\ @misc{wu2016googles, title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } ''' _lowerCAmelCase : Tuple = '''\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the \'GLEU score\'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score\'s range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. ''' _lowerCAmelCase : int = '''\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: \'google_bleu\': google_bleu score Examples: Example 1: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.44 Example 2: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.61 Example 3: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results["google_bleu"], 2)) 0.53 Example 4: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results["google_bleu"], 2)) 0.4 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case ) }
300
1
from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def __snake_case ( _lowerCAmelCase : str = "laptop" ) -> DataFrame: A_ : str = f"https://www.amazon.in/laptop/s?k={product}" A_ : Union[str, Any] = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } A_ : Tuple = BeautifulSoup(requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).text ) # Initialize a Pandas dataframe with the column titles A_ : List[Any] = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ): try: A_ : Dict = item.ha.text A_ : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"] A_ : List[Any] = item.find("span" , attrs={"class": "a-offscreen"} ).text try: A_ : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text except AttributeError: A_ : List[Any] = "Not available" try: A_ : Any = ( "₹" + item.find( "span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: A_ : List[Any] = "" try: A_ : List[Any] = float( ( ( float(product_mrp.strip("₹" ).replace("," , "" ) ) - float(product_price.strip("₹" ).replace("," , "" ) ) ) / float(product_mrp.strip("₹" ).replace("," , "" ) ) ) * 100 ) except ValueError: A_ : List[Any] = float("nan" ) except AttributeError: pass A_ : List[Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A_ : Any = " " A_ : Any = " " data_frame.index += 1 return data_frame if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = '''headphones''' get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
300
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]: A_ : Tuple = tmp_path / "cache" A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str: A_ : List[Any] = tmp_path / "cache" A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : int = features.copy() if features else default_expected_features A_ : str = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]: A_ : Dict = tmp_path / "cache" A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]: if issubclass(_lowerCAmelCase , _lowerCAmelCase ): A_ : int = parquet_path elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): A_ : Optional[int] = [parquet_path] A_ : Optional[int] = tmp_path / "cache" A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) for split in splits: A_ : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]: A_ : Optional[Any] = tmp_path / "cache" A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ : Union[str, Any] = ParquetDatasetReader( {"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple: A_ : Optional[Any] = tmp_path / "cache" A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : List[str] = features.copy() if features else default_expected_features A_ : Tuple = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]: if split: A_ : Any = {split: parquet_path} else: A_ : Optional[Any] = "train" A_ : str = {"train": parquet_path, "test": parquet_path} A_ : Any = tmp_path / "cache" A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict: A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) A_ : Dict = pf.read() assert dataset.data.table == output_table def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]: A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" ) A_ : int = {"image": [image_path]} A_ : Optional[Any] = Features({"image": Image()} ) A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase ) A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any: assert get_writer_batch_size(_lowerCAmelCase ) == expected
300
1
from ...configuration_utils import PretrainedConfig class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''bert-generation''' def __init__( self :Optional[Any] , snake_case :Optional[Any]=50_358 , snake_case :Tuple=1_024 , snake_case :Optional[Any]=24 , snake_case :Union[str, Any]=16 , snake_case :Optional[Any]=4_096 , snake_case :Dict="gelu" , snake_case :Optional[Any]=0.1 , snake_case :List[str]=0.1 , snake_case :List[str]=512 , snake_case :Any=0.02 , snake_case :Any=1e-12 , snake_case :str=0 , snake_case :Tuple=2 , snake_case :Tuple=1 , snake_case :Optional[int]="absolute" , snake_case :Tuple=True , **snake_case :int , ): '''simple docstring''' super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) A_ : Any = vocab_size A_ : Optional[int] = hidden_size A_ : Optional[int] = num_hidden_layers A_ : Any = num_attention_heads A_ : str = hidden_act A_ : List[str] = intermediate_size A_ : List[Any] = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : List[Any] = initializer_range A_ : Dict = layer_norm_eps A_ : Optional[Any] = position_embedding_type A_ : Optional[Any] = use_cache
300
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int: with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f: A_ : Optional[int] = json.load(_lowerCAmelCase ) A_ : Union[str, Any] = {} A_ : Tuple = [] A_ : Optional[Any] = [] for key, info in class_info.items(): A_ : Tuple = info["name"] class_names.append(info["name"] ) if info["isthing"]: thing_ids.append(int(_lowerCAmelCase ) ) A_ : Optional[Any] = thing_ids A_ : int = class_names return metadata class __magic_name__ ( unittest.TestCase ): """simple docstring""" def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ): '''simple docstring''' A_ : Tuple = parent A_ : List[str] = batch_size A_ : Optional[int] = num_channels A_ : Tuple = min_resolution A_ : List[Any] = max_resolution A_ : Union[str, Any] = do_resize A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size A_ : Tuple = do_normalize A_ : List[str] = image_mean A_ : List[Any] = image_std A_ : Union[str, Any] = class_info_file A_ : List[Any] = prepare_metadata(snake_case , snake_case ) A_ : Tuple = num_text A_ : str = repo_path # for the post_process_functions A_ : Any = 2 A_ : int = 10 A_ : Optional[int] = 10 A_ : Tuple = 3 A_ : Tuple = 4 A_ : str = num_labels A_ : int = do_reduce_labels A_ : List[Any] = ignore_index def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ): '''simple docstring''' if not batched: A_ : List[str] = image_inputs[0] if isinstance(snake_case , Image.Image ): A_ , A_ : Dict = image.size else: A_ , A_ : Tuple = image.shape[1], image.shape[2] if w < h: A_ : str = int(self.size["shortest_edge"] * h / w ) A_ : Any = self.size["shortest_edge"] elif w > h: A_ : Optional[int] = self.size["shortest_edge"] A_ : List[str] = int(self.size["shortest_edge"] * w / h ) else: A_ : List[str] = self.size["shortest_edge"] A_ : Optional[Any] = self.size["shortest_edge"] else: A_ : Tuple = [] for image in image_inputs: A_ , A_ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0] A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1] return expected_height, expected_width def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __UpperCamelCase = image_processing_class def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Union[str, Any] = OneFormerImageProcessorTester(self ) @property def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , "image_mean" ) ) self.assertTrue(hasattr(snake_case , "image_std" ) ) self.assertTrue(hasattr(snake_case , "do_normalize" ) ) self.assertTrue(hasattr(snake_case , "do_resize" ) ) self.assertTrue(hasattr(snake_case , "size" ) ) self.assertTrue(hasattr(snake_case , "ignore_index" ) ) self.assertTrue(hasattr(snake_case , "class_info_file" ) ) self.assertTrue(hasattr(snake_case , "num_text" ) ) self.assertTrue(hasattr(snake_case , "repo_path" ) ) self.assertTrue(hasattr(snake_case , "metadata" ) ) self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : List[str] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : Optional[Any] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : Any = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ): '''simple docstring''' A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # prepare image and target A_ : Tuple = self.image_processing_tester.num_labels A_ : str = None A_ : Tuple = None A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) if with_segmentation_maps: A_ : List[str] = num_labels if is_instance_map: A_ : List[str] = list(range(snake_case ) ) * 2 A_ : int = dict(enumerate(snake_case ) ) A_ : List[str] = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": A_ : int = [Image.fromarray(snake_case ) for annotation in annotations] A_ : List[str] = image_processor( snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , ) return inputs def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' def common(snake_case :Dict=False , snake_case :Optional[int]=None ): A_ : Tuple = self.comm_get_image_processor_inputs( with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case ) A_ : Optional[Any] = inputs["mask_labels"] A_ : List[Any] = inputs["class_labels"] A_ : Optional[Any] = inputs["pixel_values"] A_ : int = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text ) common() common(is_instance_map=snake_case ) common(is_instance_map=snake_case , segmentation_type="pil" ) common(is_instance_map=snake_case , segmentation_type="pil" ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = np.zeros((20, 50) ) A_ : List[str] = 1 A_ : int = 1 A_ : Optional[Any] = 1 A_ : Any = binary_mask_to_rle(snake_case ) self.assertEqual(len(snake_case ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs() A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case ) self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : str = self.image_processing_tester.get_fake_oneformer_outputs() A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Tuple = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs() A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
300
1
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __magic_name__ : """simple docstring""" def __init__( self :Optional[Any] , snake_case :Dict , snake_case :List[str]=12 , snake_case :Tuple=7 , snake_case :Union[str, Any]=True , snake_case :Dict=True , snake_case :List[str]=True , snake_case :Optional[int]=99 , snake_case :Tuple=32 , snake_case :Optional[int]=32 , snake_case :List[str]=2 , snake_case :Dict=4 , snake_case :Dict=37 , snake_case :Optional[int]=0.1 , snake_case :int=0.1 , snake_case :Optional[Any]=512 , snake_case :Union[str, Any]=0.02 , snake_case :Tuple=0 , snake_case :Any=None , ): '''simple docstring''' A_ : Tuple = parent A_ : Union[str, Any] = batch_size A_ : List[Any] = seq_length A_ : List[str] = is_training A_ : Dict = use_input_mask A_ : List[str] = use_labels A_ : Union[str, Any] = vocab_size A_ : Union[str, Any] = hidden_size A_ : Optional[Any] = projection_dim A_ : Tuple = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : List[str] = intermediate_size A_ : int = dropout A_ : List[str] = attention_dropout A_ : Optional[Any] = max_position_embeddings A_ : Union[str, Any] = initializer_range A_ : Tuple = scope A_ : Union[str, Any] = bos_token_id def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : List[Any] = None if self.use_input_mask: A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: A_ : Union[str, Any] = input_mask.numpy() A_ , A_ : Dict = input_mask.shape A_ : List[Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(snake_case ): A_ : Tuple = 1 A_ : int = 0 A_ : Union[str, Any] = self.get_config() return config, input_ids, tf.convert_to_tensor(snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[str] , snake_case :int , snake_case :Tuple ): '''simple docstring''' A_ : Tuple = TFBlipTextModel(config=snake_case ) A_ : Optional[Any] = model(snake_case , attention_mask=snake_case , training=snake_case ) A_ : Any = model(snake_case , training=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ , A_ , A_ : int = config_and_inputs A_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = (TFBlipTextModel,) if is_tf_available() else () __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : str = BlipTextModelTester(self ) A_ : Union[str, Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="Blip does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' pass @slow def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = TFBlipTextModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Union[str, Any]=True ): '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case )
300
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''data2vec-vision''' def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ): '''simple docstring''' super().__init__(**snake_case ) A_ : Dict = hidden_size A_ : Tuple = num_hidden_layers A_ : List[str] = num_attention_heads A_ : Any = intermediate_size A_ : Optional[Any] = hidden_act A_ : Any = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Optional[Any] = initializer_range A_ : List[str] = layer_norm_eps A_ : str = image_size A_ : Optional[int] = patch_size A_ : int = num_channels A_ : Optional[Any] = use_mask_token A_ : Optional[Any] = use_absolute_position_embeddings A_ : Optional[int] = use_relative_position_bias A_ : Dict = use_shared_relative_position_bias A_ : Any = layer_scale_init_value A_ : Optional[Any] = drop_path_rate A_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Tuple = out_indices A_ : Optional[Any] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : str = use_auxiliary_head A_ : List[Any] = auxiliary_loss_weight A_ : List[str] = auxiliary_channels A_ : Dict = auxiliary_num_convs A_ : List[str] = auxiliary_concat_input A_ : Optional[int] = semantic_loss_ignore_index class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return 1e-4
300
1
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class __magic_name__ ( nn.Module ): """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 0.0 __UpperCamelCase = 1 __UpperCamelCase = 1 __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = jnp.floataa def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Any = [] A_ : List[Any] = [] for i in range(self.num_layers ): A_ : Optional[Any] = self.in_channels if i == 0 else self.out_channels A_ : int = FlaxResnetBlockaD( in_channels=snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(snake_case ) A_ : Any = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(snake_case ) A_ : str = resnets A_ : Optional[int] = attentions if self.add_downsample: A_ : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self :str , snake_case :Optional[Any] , snake_case :int , snake_case :List[str] , snake_case :str=True ): '''simple docstring''' A_ : int = () for resnet, attn in zip(self.resnets , self.attentions ): A_ : List[str] = resnet(snake_case , snake_case , deterministic=snake_case ) A_ : Union[str, Any] = attn(snake_case , snake_case , deterministic=snake_case ) output_states += (hidden_states,) if self.add_downsample: A_ : Optional[int] = self.downsamplers_a(snake_case ) output_states += (hidden_states,) return hidden_states, output_states class __magic_name__ ( nn.Module ): """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 0.0 __UpperCamelCase = 1 __UpperCamelCase = True __UpperCamelCase = jnp.floataa def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : str = [] for i in range(self.num_layers ): A_ : Any = self.in_channels if i == 0 else self.out_channels A_ : Any = FlaxResnetBlockaD( in_channels=snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(snake_case ) A_ : Tuple = resnets if self.add_downsample: A_ : List[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self :Union[str, Any] , snake_case :Optional[int] , snake_case :int , snake_case :Optional[Any]=True ): '''simple docstring''' A_ : Optional[int] = () for resnet in self.resnets: A_ : List[Any] = resnet(snake_case , snake_case , deterministic=snake_case ) output_states += (hidden_states,) if self.add_downsample: A_ : Tuple = self.downsamplers_a(snake_case ) output_states += (hidden_states,) return hidden_states, output_states class __magic_name__ ( nn.Module ): """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 0.0 __UpperCamelCase = 1 __UpperCamelCase = 1 __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = jnp.floataa def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Optional[Any] = [] A_ : List[str] = [] for i in range(self.num_layers ): A_ : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels A_ : List[str] = self.prev_output_channel if i == 0 else self.out_channels A_ : int = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(snake_case ) A_ : List[Any] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(snake_case ) A_ : Any = resnets A_ : Optional[int] = attentions if self.add_upsample: A_ : Tuple = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self :Union[str, Any] , snake_case :Dict , snake_case :List[Any] , snake_case :str , snake_case :Union[str, Any] , snake_case :List[str]=True ): '''simple docstring''' for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A_ : Optional[int] = res_hidden_states_tuple[-1] A_ : Union[str, Any] = res_hidden_states_tuple[:-1] A_ : int = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A_ : Union[str, Any] = resnet(snake_case , snake_case , deterministic=snake_case ) A_ : List[Any] = attn(snake_case , snake_case , deterministic=snake_case ) if self.add_upsample: A_ : List[Any] = self.upsamplers_a(snake_case ) return hidden_states class __magic_name__ ( nn.Module ): """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 0.0 __UpperCamelCase = 1 __UpperCamelCase = True __UpperCamelCase = jnp.floataa def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Optional[Any] = [] for i in range(self.num_layers ): A_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels A_ : Optional[Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(snake_case ) A_ : Union[str, Any] = resnets if self.add_upsample: A_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self :Optional[Any] , snake_case :Tuple , snake_case :List[Any] , snake_case :str , snake_case :Tuple=True ): '''simple docstring''' for resnet in self.resnets: # pop res hidden states A_ : int = res_hidden_states_tuple[-1] A_ : Optional[int] = res_hidden_states_tuple[:-1] A_ : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A_ : Tuple = resnet(snake_case , snake_case , deterministic=snake_case ) if self.add_upsample: A_ : List[Any] = self.upsamplers_a(snake_case ) return hidden_states class __magic_name__ ( nn.Module ): """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 0.0 __UpperCamelCase = 1 __UpperCamelCase = 1 __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = jnp.floataa def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : List[str] = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A_ : str = [] for _ in range(self.num_layers ): A_ : Optional[Any] = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(snake_case ) A_ : Any = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(snake_case ) A_ : int = resnets A_ : Optional[int] = attentions def __call__( self :List[str] , snake_case :Dict , snake_case :List[Any] , snake_case :Optional[Any] , snake_case :List[Any]=True ): '''simple docstring''' A_ : Tuple = self.resnets[0](snake_case , snake_case ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A_ : List[Any] = attn(snake_case , snake_case , deterministic=snake_case ) A_ : Union[str, Any] = resnet(snake_case , snake_case , deterministic=snake_case ) return hidden_states
300
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging _lowerCAmelCase : str = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = ['''input_features''', '''attention_mask'''] def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ): '''simple docstring''' super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case ) A_ : Union[str, Any] = feature_size A_ : int = sampling_rate A_ : str = padding_value A_ : int = hop_length A_ : List[str] = win_length A_ : Any = frame_signal_scale A_ : str = preemphasis_coeff A_ : List[str] = mel_floor A_ : str = normalize_means A_ : Any = normalize_vars A_ : Optional[Any] = win_function A_ : Dict = return_attention_mask A_ : List[str] = win_length * sampling_rate // 1_000 A_ : List[str] = hop_length * sampling_rate // 1_000 A_ : List[str] = optimal_fft_length(self.sample_size ) A_ : str = (self.n_fft // 2) + 1 def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ): '''simple docstring''' if self.win_function == "hamming_window": A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case ) else: A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function ) A_ : Optional[int] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) A_ : Tuple = spectrogram( one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , ) return msfc_features.T def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ): '''simple docstring''' if self.normalize_means: A_ : int = x[:input_length].mean(axis=0 ) A_ : Any = np.subtract(snake_case , snake_case ) if self.normalize_vars: A_ : List[Any] = x[:input_length].std(axis=0 ) A_ : Optional[int] = np.divide(snake_case , snake_case ) if input_length < x.shape[0]: A_ : Optional[int] = padding_value # make sure array is in float32 A_ : Union[str, Any] = x.astype(np.floataa ) return x def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ): '''simple docstring''' A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )] def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) A_ : Optional[Any] = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): A_ : int = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Tuple = [raw_speech] # extract fbank features A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech] # convert into correct format for padding A_ : Union[str, Any] = BatchFeature({"input_features": features} ) A_ : str = self.pad( snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , ) # make sure list is in array format A_ : Optional[int] = padded_inputs.get("input_features" ) if isinstance(input_features[0] , snake_case ): A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features] A_ : Dict = padded_inputs.get("attention_mask" ) if attention_mask is not None: A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: A_ : Dict = ( np.array(snake_case , dtype=np.intaa ) if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) A_ : Optional[int] = self.normalize( padded_inputs["input_features"] , attention_mask=snake_case ) if return_tensors is not None: A_ : Dict = padded_inputs.convert_to_tensors(snake_case ) return padded_inputs
300
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCAmelCase : Optional[int] = 16 _lowerCAmelCase : List[str] = 32 def __snake_case ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ) -> Tuple: A_ : int = AutoTokenizer.from_pretrained("bert-base-cased" ) A_ : Optional[int] = load_dataset("glue" , "mrpc" ) def tokenize_function(_lowerCAmelCase : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) A_ : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Dict = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : Dict = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_lowerCAmelCase : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : Dict = 16 elif accelerator.mixed_precision != "no": A_ : Dict = 8 else: A_ : int = None return tokenizer.pad( _lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. A_ : Tuple = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) A_ : Union[str, Any] = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCAmelCase : Optional[Any] = mocked_dataloaders # noqa: F811 def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict: # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1": A_ : Tuple = 2 # New Code # A_ : List[Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator A_ : Optional[int] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : Optional[int] = config["lr"] A_ : Optional[Any] = int(config["num_epochs"] ) A_ : List[str] = int(config["seed"] ) A_ : Tuple = int(config["batch_size"] ) A_ : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(_lowerCAmelCase ) A_ , A_ : Optional[Any] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : int = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : int = model.to(accelerator.device ) # Instantiate optimizer A_ : List[Any] = AdamW(params=model.parameters() , lr=_lowerCAmelCase ) # Instantiate scheduler A_ : List[Any] = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ , A_ , A_ , A_ , A_ : Any = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Now we train the model for epoch in range(_lowerCAmelCase ): model.train() for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_lowerCAmelCase ): A_ : Union[str, Any] = model(**_lowerCAmelCase ) A_ : Optional[int] = output.loss accelerator.backward(_lowerCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A_ : Optional[Any] = model(**_lowerCAmelCase ) A_ : Union[str, Any] = outputs.logits.argmax(dim=-1 ) A_ , A_ : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_lowerCAmelCase , references=_lowerCAmelCase , ) A_ : List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , _lowerCAmelCase ) def __snake_case ( ) -> Tuple: A_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_lowerCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) A_ : int = parser.parse_args() A_ : List[str] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
300
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' super().__init__() A_ : Tuple = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" f" `n_embd`: {n_embd} are not equal." ) A_ : List[Any] = prefix_inner_dim A_ : Union[str, Any] = prefix_hidden_dim A_ : List[str] = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = ( nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = GPTaConfig( vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , ) A_ : Optional[Any] = GPTaLMHeadModel(snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ): '''simple docstring''' A_ : Any = self.transformer.transformer.wte(snake_case ) A_ : str = self.encode_prefix(snake_case ) A_ : Union[str, Any] = self.decode_prefix(snake_case ) A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A_ : int = torch.cat((dummy_token, input_ids) , dim=1 ) A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ): '''simple docstring''' return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ): '''simple docstring''' return self.encode_prefix(snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Any = torch.split(snake_case , 1 , dim=0 ) A_ : Optional[int] = [] A_ : Union[str, Any] = [] for feature in features: A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature # Only support beam search for now A_ , A_ : Dict = self.generate_beam( input_embeds=snake_case , device=snake_case , eos_token_id=snake_case ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A_ : int = torch.stack(snake_case ) A_ : int = torch.stack(snake_case ) return generated_tokens, generated_seq_lengths @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ): '''simple docstring''' A_ : Optional[Any] = eos_token_id A_ : List[Any] = None A_ : List[Any] = None A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int ) A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool ) if input_embeds is not None: A_ : Any = input_embeds else: A_ : Optional[Any] = self.transformer.transformer.wte(snake_case ) for i in range(snake_case ): A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case ) A_ : str = outputs.logits A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A_ : List[str] = logits.softmax(-1 ).log() if scores is None: A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 ) A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] ) A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A_ : Union[str, Any] = next_tokens else: A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] ) A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 ) else: A_ : List[str] = -float(np.inf ) A_ : List[Any] = 0 A_ : Union[str, Any] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 A_ : Optional[Any] = scores_sum / seq_lengths[:, None] A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 ) A_ : str = next_tokens // scores_sum.shape[1] A_ : Union[str, Any] = seq_lengths[next_tokens_source] A_ : Optional[int] = next_tokens % scores_sum.shape[1] A_ : Tuple = next_tokens.unsqueeze(1 ) A_ : Tuple = tokens[next_tokens_source] A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 ) A_ : Dict = generated[next_tokens_source] A_ : Union[str, Any] = scores_sum_average * seq_lengths A_ : Optional[int] = is_stopped[next_tokens_source] A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 ) A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze() if is_stopped.all(): break A_ : int = scores / seq_lengths A_ : str = scores.argsort(descending=snake_case ) # tokens tensors are already padded to max_seq_length A_ : Dict = [tokens[i] for i in order] A_ : int = torch.stack(snake_case , dim=0 ) A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
300
1
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :List[Any]=0 ): '''simple docstring''' A_ : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case ) ) A_ : str = np.random.RandomState(snake_case ) A_ : Any = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=snake_case ) A_ : str = self.get_dummy_inputs() A_ : int = pipe(**snake_case ).images A_ : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A_ : Optional[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A_ : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case ) pipe.set_progress_bar_config(disable=snake_case ) A_ : str = self.get_dummy_inputs() A_ : Tuple = pipe(**snake_case ).images A_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Optional[int] = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A_ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case ) # warmup pass to apply optimizations A_ : Dict = pipe(**self.get_dummy_inputs() ) A_ : Any = self.get_dummy_inputs() A_ : List[str] = pipe(**snake_case ).images A_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : List[str] = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A_ : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case ) A_ : Tuple = self.get_dummy_inputs() A_ : str = pipe(**snake_case ).images A_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : int = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A_ : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case ) A_ : str = self.get_dummy_inputs() A_ : Optional[Any] = pipe(**snake_case ).images A_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Union[str, Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case ) A_ : List[Any] = self.get_dummy_inputs() A_ : Union[str, Any] = pipe(**snake_case ).images A_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Any = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class __magic_name__ ( unittest.TestCase ): """simple docstring""" @property def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : int = ort.SessionOptions() A_ : int = False return options def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A_ : Union[str, Any] = init_image.resize((768, 512) ) # using the PNDM scheduler by default A_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=snake_case , feature_extractor=snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case ) A_ : Optional[int] = "A fantasy landscape, trending on artstation" A_ : Any = np.random.RandomState(0 ) A_ : Any = pipe( prompt=snake_case , image=snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case , output_type="np" , ) A_ : Tuple = output.images A_ : Optional[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A_ : List[str] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A_ : Optional[int] = init_image.resize((768, 512) ) A_ : Tuple = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) A_ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case ) A_ : Any = "A fantasy landscape, trending on artstation" A_ : Tuple = np.random.RandomState(0 ) A_ : Union[str, Any] = pipe( prompt=snake_case , image=snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case , output_type="np" , ) A_ : Dict = output.images A_ : Union[str, Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A_ : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
300
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowerCAmelCase : Tuple = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ): '''simple docstring''' warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , snake_case , ) super().__init__(*snake_case , **snake_case )
300
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase : Dict = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[Any] = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
300
from __future__ import annotations def __snake_case ( _lowerCAmelCase : list[float] ) -> bool: if len(_lowerCAmelCase ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) A_ : List[str] = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
300
1
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets _lowerCAmelCase : List[Any] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' _lowerCAmelCase : Union[str, Any] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' _lowerCAmelCase : Optional[Any] = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' A_ : List[str] = len(references[0] ) if any(len(snake_case ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A_ : int = [[refs[i] for refs in references] for i in range(snake_case )] A_ : Optional[Any] = TER( normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , ) A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
300
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging _lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ): '''simple docstring''' super().__init__() self.register_modules( vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A_ : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' self.enable_attention_slicing(snake_case ) @torch.no_grad() def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ): '''simple docstring''' if isinstance(snake_case , snake_case ): A_ : Dict = 1 elif isinstance(snake_case , snake_case ): A_ : Optional[Any] = len(snake_case ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(snake_case )}." ) # get prompt text embeddings A_ : int = self.tokenizer( snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) A_ : Dict = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A_ , A_ , A_ : int = text_embeddings.shape A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 ) A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A_ : Dict = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A_ : List[str] if negative_prompt is None: A_ : List[str] = [""] elif type(snake_case ) is not type(snake_case ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !=" f" {type(snake_case )}." ) elif isinstance(snake_case , snake_case ): A_ : Optional[Any] = [negative_prompt] elif batch_size != len(snake_case ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: A_ : Any = negative_prompt A_ : Optional[int] = text_input_ids.shape[-1] A_ : Dict = self.tokenizer( snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , ) A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A_ : Tuple = uncond_embeddings.shape[1] A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 ) A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) A_ : List[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A_ : Tuple = torch.randn( snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device ) A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to( self.device ) else: A_ : int = torch.randn( snake_case , generator=snake_case , device=self.device , dtype=snake_case ) A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case ) else: if latents_reference.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A_ : Tuple = latents_reference.to(self.device ) A_ : Any = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2 A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2 A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy A_ : Optional[Any] = 0 if dx < 0 else dx A_ : Optional[Any] = 0 if dy < 0 else dy A_ : List[str] = max(-dx , 0 ) A_ : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A_ : str = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ : List[str] = {} if accepts_eta: A_ : Union[str, Any] = eta for i, t in enumerate(self.progress_bar(snake_case ) ): # expand the latents if we are doing classifier free guidance A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case ) # predict the noise residual A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample # perform guidance if do_classifier_free_guidance: A_ , A_ : Dict = noise_pred.chunk(2 ) A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case , snake_case , snake_case ) A_ : List[str] = 1 / 0.18215 * latents A_ : Tuple = self.vae.decode(snake_case ).sample A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to( self.device ) A_ , A_ : List[str] = self.safety_checker( images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: A_ : List[str] = None if output_type == "pil": A_ : Optional[int] = self.numpy_to_pil(snake_case ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
300
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''lxmert''' __UpperCamelCase = {} def __init__( self :Union[str, Any] , snake_case :int=30_522 , snake_case :Any=768 , snake_case :Any=12 , snake_case :Dict=9_500 , snake_case :Tuple=1_600 , snake_case :str=400 , snake_case :str=3_072 , snake_case :Dict="gelu" , snake_case :Optional[Any]=0.1 , snake_case :Optional[int]=0.1 , snake_case :int=512 , snake_case :Any=2 , snake_case :Optional[int]=0.02 , snake_case :Optional[Any]=1e-12 , snake_case :Optional[int]=9 , snake_case :Optional[int]=5 , snake_case :int=5 , snake_case :List[Any]=2_048 , snake_case :List[Any]=4 , snake_case :Any=6.67 , snake_case :List[Any]=True , snake_case :Optional[int]=True , snake_case :Any=True , snake_case :Dict=True , snake_case :List[str]=True , snake_case :Any=True , snake_case :Optional[int]=True , **snake_case :Union[str, Any] , ): '''simple docstring''' A_ : Optional[int] = vocab_size A_ : Union[str, Any] = hidden_size A_ : Union[str, Any] = num_attention_heads A_ : Dict = hidden_act A_ : List[Any] = intermediate_size A_ : int = hidden_dropout_prob A_ : int = attention_probs_dropout_prob A_ : List[Any] = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Tuple = initializer_range A_ : Dict = layer_norm_eps A_ : int = num_qa_labels A_ : Tuple = num_object_labels A_ : str = num_attr_labels A_ : Any = l_layers A_ : int = x_layers A_ : Any = r_layers A_ : str = visual_feat_dim A_ : Optional[int] = visual_pos_dim A_ : Union[str, Any] = visual_loss_normalizer A_ : int = task_matched A_ : Tuple = task_mask_lm A_ : Tuple = task_obj_predict A_ : str = task_qa A_ : Any = visual_obj_loss A_ : str = visual_attr_loss A_ : List[Any] = visual_feat_loss A_ : List[str] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**snake_case )
300
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict: A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase ) A_ : List[str] = nn.functional.normalize(_lowerCAmelCase ) return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = CLIPConfig __UpperCamelCase = ['''CLIPEncoderLayer'''] def __init__( self :int , snake_case :CLIPConfig ): '''simple docstring''' super().__init__(snake_case ) A_ : int = CLIPVisionModel(config.vision_config ) A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case ) A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case ) A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case ) A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case ) A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ): '''simple docstring''' A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output A_ : List[Any] = self.visual_projection(snake_case ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy() A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy() A_ : Union[str, Any] = [] A_ : Any = image_embeds.shape[0] for i in range(snake_case ): A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A_ : Optional[Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A_ : Optional[Any] = special_cos_dist[i][concept_idx] A_ : Tuple = self.special_care_embeds_weights[concept_idx].item() A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A_ : Any = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A_ : Tuple = cos_dist[i][concept_idx] A_ : Tuple = self.concept_embeds_weights[concept_idx].item() A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(snake_case ) result.append(snake_case ) A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ): '''simple docstring''' A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output A_ : int = self.visual_projection(snake_case ) A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds ) A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A_ : Optional[Any] = 0.0 A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) A_ : Optional[Any] = special_care * 0.01 A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
300
1
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _lowerCAmelCase : Tuple = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _lowerCAmelCase : Union[str, Any] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _lowerCAmelCase : Optional[Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_000)) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> tuple[str, float]: A_ : Tuple = len([g for position, g in enumerate(_lowerCAmelCase ) if g == main_target[position]] ) return (item, float(_lowerCAmelCase )) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> tuple[str, str]: A_ : Optional[Any] = random.randint(0 , len(_lowerCAmelCase ) - 1 ) A_ : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:] A_ : List[str] = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : list[str] ) -> str: A_ : Tuple = list(_lowerCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: A_ : str = random.choice(_lowerCAmelCase ) return "".join(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : tuple[str, float] , _lowerCAmelCase : list[tuple[str, float]] , _lowerCAmelCase : list[str] , ) -> list[str]: A_ : int = [] # Generate more children proportionally to the fitness score. A_ : int = int(parent_a[1] * 100 ) + 1 A_ : Any = 10 if child_n >= 10 else child_n for _ in range(_lowerCAmelCase ): A_ : int = population_score[random.randint(0 , _lowerCAmelCase )][0] A_ , A_ : Any = crossover(parent_a[0] , _lowerCAmelCase ) # Append new string to the population list. pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) ) pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) ) return pop def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : list[str] , _lowerCAmelCase : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: A_ : Union[str, Any] = f"{N_POPULATION} must be bigger than {N_SELECTED}" raise ValueError(_lowerCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. A_ : List[str] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: A_ : int = f"{not_in_genes_list} is not in genes list, evolution cannot converge" raise ValueError(_lowerCAmelCase ) # Generate random starting population. A_ : int = [] for _ in range(_lowerCAmelCase ): population.append("".join([random.choice(_lowerCAmelCase ) for i in range(len(_lowerCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. A_ , A_ : Tuple = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_lowerCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. A_ : Tuple = [evaluate(_lowerCAmelCase , _lowerCAmelCase ) for item in population] # Check if there is a matching evolution. A_ : List[Any] = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] , reverse=_lowerCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f"\nGeneration: {generation}" f"\nTotal Population:{total_population}" f"\nBest score: {population_score[0][1]}" f"\nBest string: {population_score[0][0]}" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. A_ : int = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_lowerCAmelCase ) # Normalize population score to be between 0 and 1. A_ : str = [ (item, score / len(_lowerCAmelCase )) for item, score in population_score ] # This is selection for i in range(_lowerCAmelCase ): population.extend(select(population_score[int(_lowerCAmelCase )] , _lowerCAmelCase , _lowerCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_lowerCAmelCase ) > N_POPULATION: break if __name__ == "__main__": _lowerCAmelCase : Dict = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) _lowerCAmelCase : Union[str, Any] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = basic(target_str, genes_list) print( F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
300
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]: A_ : Tuple = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict: for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" ) A_ : List[Any] = in_proj_weight[ : encoder_config.hidden_size, : ] A_ : Optional[Any] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A_ : Optional[Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any: A_ : Dict = dct.pop(_lowerCAmelCase ) A_ : List[Any] = val def __snake_case ( _lowerCAmelCase : List[str] ) -> int: if "handwritten" in checkpoint_url: A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" ) return im @torch.no_grad() def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]: A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase ) A_ : Tuple = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A_ : Tuple = 768 elif "large" in checkpoint_url: # use ViT-large encoder A_ : Optional[Any] = 1024 A_ : Union[str, Any] = 4096 A_ : Union[str, Any] = 24 A_ : List[Any] = 16 A_ : List[str] = 1024 else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Dict = False A_ : int = "relu" A_ : Optional[int] = 1024 A_ : Any = True A_ : List[Any] = False A_ : Optional[int] = False # load HuggingFace model A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase ) A_ : str = TrOCRForCausalLM(_lowerCAmelCase ) A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) model.eval() # load state_dict of original model, rename some keys A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"] A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A_ : Dict = state_dict.pop(_lowerCAmelCase ) if key.startswith("decoder" ) and "output_projection" not in key: A_ : List[str] = val else: A_ : Optional[Any] = val # load state dict model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size ) A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" ) A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase ) A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values # verify logits A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ) A_ : Tuple = outputs.logits A_ : Union[str, Any] = torch.Size([1, 1, 50265] ) if "trocr-base-handwritten" in checkpoint_url: A_ : Union[str, Any] = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: A_ : str = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: A_ : Optional[Any] = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: A_ : Optional[int] = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected" Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : List[str] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
300
1
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str: return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Any: return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=float("inf" ) ) -> int: for i in range(points_counts - 1 ): for j in range(i + 1 , _lowerCAmelCase ): A_ : Tuple = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=float("inf" ) ) -> Dict: for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ): for j in range(max(0 , i - 6 ) , _lowerCAmelCase ): A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]: # base case if points_counts <= 3: return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase ) # recursion A_ : Optional[int] = points_counts // 2 A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase ) A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid ) A_ : Tuple = min(_lowerCAmelCase , _lowerCAmelCase ) A_ : Dict = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowerCAmelCase ) A_ : Tuple = dis_between_closest_in_strip( _lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase ) return min(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Any: A_ : Optional[Any] = column_based_sort(_lowerCAmelCase , column=0 ) A_ : Optional[int] = column_based_sort(_lowerCAmelCase , column=1 ) return ( closest_pair_of_points_sqr( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) ** 0.5 if __name__ == "__main__": _lowerCAmelCase : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
300
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = 1 @register_to_config def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ): '''simple docstring''' A_ : Dict = sigma_max # setable values A_ : List[Any] = None self.set_sigmas(snake_case , snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ): '''simple docstring''' return sample def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ): '''simple docstring''' A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ): '''simple docstring''' A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(snake_case , snake_case ) A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) ) A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ): '''simple docstring''' return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) A_ : int = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda A_ : Dict = timesteps.to(self.discrete_sigmas.device ) A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device ) A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device ) A_ : Union[str, Any] = torch.zeros_like(snake_case ) A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods A_ : Optional[int] = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): A_ : Tuple = diffusion.unsqueeze(-1 ) A_ : Optional[Any] = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of A_ : List[Any] = randn_tensor( sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype ) A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device ) # compute step size from the model_output, the noise, and the snr A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term A_ : int = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): A_ : str = step_size.unsqueeze(-1 ) A_ : Optional[Any] = sample + step_size * model_output A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ): '''simple docstring''' A_ : Union[str, Any] = timesteps.to(original_samples.device ) A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps] A_ : List[Any] = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(snake_case ) * sigmas[:, None, None, None] ) A_ : Optional[int] = noise + original_samples return noisy_samples def __len__( self :Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
300
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_50, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_00, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_00, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class __magic_name__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=snake_case , ) assert hasattr(self , "env" ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Union[str, Any] ): '''simple docstring''' A_ : Optional[int] = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings A_ : List[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=snake_case , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="py36" , ) def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict ): '''simple docstring''' TrainingJobAnalytics(snake_case ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" ) @parameterized.expand([(2,)] ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Tuple ): '''simple docstring''' A_ : Tuple = self.create_estimator(snake_case ) # run training estimator.fit() # result dataframe A_ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis A_ : int = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) A_ : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping A_ : Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case )
300
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float: A_ : Dict = a while True: A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - ( Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307 return float(_lowerCAmelCase ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
300
1
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' super().__init__() A_ : Tuple = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" f" `n_embd`: {n_embd} are not equal." ) A_ : List[Any] = prefix_inner_dim A_ : Union[str, Any] = prefix_hidden_dim A_ : List[str] = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = ( nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = GPTaConfig( vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , ) A_ : Optional[Any] = GPTaLMHeadModel(snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ): '''simple docstring''' A_ : Any = self.transformer.transformer.wte(snake_case ) A_ : str = self.encode_prefix(snake_case ) A_ : Union[str, Any] = self.decode_prefix(snake_case ) A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A_ : int = torch.cat((dummy_token, input_ids) , dim=1 ) A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ): '''simple docstring''' return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ): '''simple docstring''' return self.encode_prefix(snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Any = torch.split(snake_case , 1 , dim=0 ) A_ : Optional[int] = [] A_ : Union[str, Any] = [] for feature in features: A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature # Only support beam search for now A_ , A_ : Dict = self.generate_beam( input_embeds=snake_case , device=snake_case , eos_token_id=snake_case ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A_ : int = torch.stack(snake_case ) A_ : int = torch.stack(snake_case ) return generated_tokens, generated_seq_lengths @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ): '''simple docstring''' A_ : Optional[Any] = eos_token_id A_ : List[Any] = None A_ : List[Any] = None A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int ) A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool ) if input_embeds is not None: A_ : Any = input_embeds else: A_ : Optional[Any] = self.transformer.transformer.wte(snake_case ) for i in range(snake_case ): A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case ) A_ : str = outputs.logits A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A_ : List[str] = logits.softmax(-1 ).log() if scores is None: A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 ) A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] ) A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A_ : Union[str, Any] = next_tokens else: A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] ) A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 ) else: A_ : List[str] = -float(np.inf ) A_ : List[Any] = 0 A_ : Union[str, Any] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 A_ : Optional[Any] = scores_sum / seq_lengths[:, None] A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 ) A_ : str = next_tokens // scores_sum.shape[1] A_ : Union[str, Any] = seq_lengths[next_tokens_source] A_ : Optional[int] = next_tokens % scores_sum.shape[1] A_ : Tuple = next_tokens.unsqueeze(1 ) A_ : Tuple = tokens[next_tokens_source] A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 ) A_ : Dict = generated[next_tokens_source] A_ : Union[str, Any] = scores_sum_average * seq_lengths A_ : Optional[int] = is_stopped[next_tokens_source] A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 ) A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze() if is_stopped.all(): break A_ : int = scores / seq_lengths A_ : str = scores.argsort(descending=snake_case ) # tokens tensors are already padded to max_seq_length A_ : Dict = [tokens[i] for i in order] A_ : int = torch.stack(snake_case , dim=0 ) A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
300
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets _lowerCAmelCase : List[Any] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' _lowerCAmelCase : Union[str, Any] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' _lowerCAmelCase : Optional[Any] = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' A_ : List[str] = len(references[0] ) if any(len(snake_case ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A_ : int = [[refs[i] for refs in references] for i in range(snake_case )] A_ : Optional[Any] = TER( normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , ) A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
300
1
from __future__ import annotations _lowerCAmelCase : Dict = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } class __magic_name__ : """simple docstring""" def __init__( self :List[str] , snake_case :dict[str, list[str]] , snake_case :str ): '''simple docstring''' A_ : Optional[Any] = graph # mapping node to its parent in resulting breadth first tree A_ : dict[str, str | None] = {} A_ : Optional[Any] = source_vertex def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Optional[Any] = {self.source_vertex} A_ : Union[str, Any] = None A_ : str = [self.source_vertex] # first in first out queue while queue: A_ : int = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(snake_case ) A_ : str = vertex queue.append(snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex A_ : Union[str, Any] = self.parent.get(snake_case ) if target_vertex_parent is None: A_ : Optional[Any] = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(snake_case ) return self.shortest_path(snake_case ) + f"->{target_vertex}" if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = Graph(graph, '''G''') g.breath_first_search() print(g.shortest_path('''D''')) print(g.shortest_path('''G''')) print(g.shortest_path('''Foo'''))
300
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str: return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Any: return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=float("inf" ) ) -> int: for i in range(points_counts - 1 ): for j in range(i + 1 , _lowerCAmelCase ): A_ : Tuple = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=float("inf" ) ) -> Dict: for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ): for j in range(max(0 , i - 6 ) , _lowerCAmelCase ): A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]: # base case if points_counts <= 3: return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase ) # recursion A_ : Optional[int] = points_counts // 2 A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase ) A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid ) A_ : Tuple = min(_lowerCAmelCase , _lowerCAmelCase ) A_ : Dict = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowerCAmelCase ) A_ : Tuple = dis_between_closest_in_strip( _lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase ) return min(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Any: A_ : Optional[Any] = column_based_sort(_lowerCAmelCase , column=0 ) A_ : Optional[int] = column_based_sort(_lowerCAmelCase , column=1 ) return ( closest_pair_of_points_sqr( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) ** 0.5 if __name__ == "__main__": _lowerCAmelCase : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
300
1
def __snake_case ( _lowerCAmelCase : list ) -> list: if len(_lowerCAmelCase ) <= 1: return [tuple(_lowerCAmelCase )] A_ : Tuple = [] def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ): A_ : List[str] = [0] * n res.append(tuple(_lowerCAmelCase ) ) A_ : int = 0 while i < n: if c[i] < i: if i % 2 == 0: A_ , A_ : str = arr[i], arr[0] else: A_ , A_ : List[str] = arr[i], arr[c[i]] res.append(tuple(_lowerCAmelCase ) ) c[i] += 1 A_ : Tuple = 0 else: A_ : Dict = 0 i += 1 generate(len(_lowerCAmelCase ) , _lowerCAmelCase ) return res if __name__ == "__main__": _lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip() _lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
300
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : """simple docstring""" def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ): '''simple docstring''' A_ : Tuple = parent A_ : int = batch_size A_ : List[str] = image_size A_ : List[Any] = patch_size A_ : Optional[Any] = num_channels A_ : List[Any] = is_training A_ : Tuple = use_labels A_ : Union[str, Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Any = num_attention_heads A_ : List[str] = intermediate_size A_ : Optional[int] = hidden_act A_ : List[str] = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Any = type_sequence_label_size A_ : List[str] = initializer_range A_ : Dict = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Optional[int] = (image_size // patch_size) ** 2 A_ : List[str] = num_patches + 1 def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Tuple = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ): '''simple docstring''' A_ : Optional[Any] = ViTMSNModel(config=snake_case ) model.to(snake_case ) model.eval() A_ : int = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ): '''simple docstring''' A_ : Dict = self.type_sequence_label_size A_ : Tuple = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Union[str, Any] = model(snake_case , labels=snake_case ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : Union[str, Any] = 1 A_ : int = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Optional[Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : List[str] = self.prepare_config_and_inputs() A_ , A_ , A_ : Optional[int] = config_and_inputs A_ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __UpperCamelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Tuple = ViTMSNModelTester(self ) A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(snake_case ) A_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[str] = [*signature.parameters.keys()] A_ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __snake_case ( ) -> Optional[Any]: A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' torch.manual_seed(2 ) A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case ) A_ : List[str] = self.default_image_processor A_ : int = prepare_img() A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case ) # forward pass with torch.no_grad(): A_ : Optional[int] = model(**snake_case ) # verify the logits A_ : List[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
300
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :str ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ): A_ : Dict = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : str = "sshleifer/tiny-gpt2" A_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case , multi_process=snake_case , ) A_ : str = TensorFlowBenchmark(snake_case ) A_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : Dict = "sgugger/tiny-distilbert-classification" A_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , only_pretrain_model=snake_case , ) A_ : List[str] = TensorFlowBenchmark(snake_case ) A_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Union[str, Any] = "sshleifer/tiny-gpt2" A_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , ) A_ : Any = TensorFlowBenchmark(snake_case ) A_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Dict = "sshleifer/tiny-gpt2" A_ : Optional[Any] = AutoConfig.from_pretrained(snake_case ) A_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case , multi_process=snake_case , ) A_ : List[str] = TensorFlowBenchmark(snake_case , [config] ) A_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : Optional[Any] = "sshleifer/tiny-gpt2" A_ : Any = AutoConfig.from_pretrained(snake_case ) A_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , ) A_ : List[Any] = TensorFlowBenchmark(snake_case , [config] ) A_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Optional[int] = "sshleifer/tiny-gpt2" A_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , ) A_ : Optional[int] = TensorFlowBenchmark(snake_case ) A_ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : int = "sshleifer/tiny-gpt2" A_ : Union[str, Any] = AutoConfig.from_pretrained(snake_case ) A_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , ) A_ : Any = TensorFlowBenchmark(snake_case , [config] ) A_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : List[Any] = "patrickvonplaten/t5-tiny-random" A_ : Tuple = AutoConfig.from_pretrained(snake_case ) A_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , ) A_ : Union[str, Any] = TensorFlowBenchmark(snake_case , configs=[config] ) A_ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : int = "sshleifer/tiny-gpt2" A_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , use_xla=snake_case , multi_process=snake_case , ) A_ : List[str] = TensorFlowBenchmark(snake_case ) A_ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Union[str, Any] = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: A_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=snake_case , save_to_csv=snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(snake_case , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(snake_case , "inf_mem.csv" ) , env_info_csv_file=os.path.join(snake_case , "env.csv" ) , multi_process=snake_case , ) A_ : Any = TensorFlowBenchmark(snake_case ) benchmark.run() self.assertTrue(Path(os.path.join(snake_case , "inf_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(snake_case , "inf_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(snake_case , "env.csv" ) ).exists() ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : Dict = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(snake_case :Dict ): self.assertTrue(hasattr(snake_case , "sequential" ) ) self.assertTrue(hasattr(snake_case , "cumulative" ) ) self.assertTrue(hasattr(snake_case , "current" ) ) self.assertTrue(hasattr(snake_case , "total" ) ) with tempfile.TemporaryDirectory() as tmp_dir: A_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(snake_case , "log.txt" ) , log_print=snake_case , trace_memory_line_by_line=snake_case , eager_mode=snake_case , multi_process=snake_case , ) A_ : int = TensorFlowBenchmark(snake_case ) A_ : Optional[Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(snake_case , "log.txt" ) ).exists() )
300
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = (DDPMScheduler,) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ): '''simple docstring''' A_ : Dict = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**snake_case ) return config def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case , beta_end=snake_case ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.check_over_configs(thresholding=snake_case ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Tuple = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : List[str] = scheduler_class(**snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : int = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : int = scheduler_class(**snake_case ) A_ : Tuple = len(snake_case ) A_ : List[str] = self.dummy_model() A_ : Optional[Any] = self.dummy_sample_deter A_ : List[str] = torch.manual_seed(0 ) for t in reversed(range(snake_case ) ): # 1. predict noise residual A_ : Tuple = model(snake_case , snake_case ) # 2. predict previous mean of sample x_t-1 A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : Optional[int] = pred_prev_sample A_ : Tuple = torch.sum(torch.abs(snake_case ) ) A_ : str = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Optional[int] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config(prediction_type="v_prediction" ) A_ : List[str] = scheduler_class(**snake_case ) A_ : int = len(snake_case ) A_ : Dict = self.dummy_model() A_ : str = self.dummy_sample_deter A_ : Any = torch.manual_seed(0 ) for t in reversed(range(snake_case ) ): # 1. predict noise residual A_ : Optional[int] = model(snake_case , snake_case ) # 2. predict previous mean of sample x_t-1 A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : List[str] = pred_prev_sample A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) ) A_ : List[str] = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.scheduler_classes[0] A_ : Optional[Any] = self.get_scheduler_config() A_ : Dict = scheduler_class(**snake_case ) A_ : Optional[int] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=snake_case ) A_ : Optional[int] = scheduler.timesteps for i, timestep in enumerate(snake_case ): if i == len(snake_case ) - 1: A_ : str = -1 else: A_ : List[str] = timesteps[i + 1] A_ : Optional[int] = scheduler.previous_timestep(snake_case ) A_ : List[str] = prev_t.item() self.assertEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Optional[Any] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config() A_ : Tuple = scheduler_class(**snake_case ) A_ : List[str] = [100, 87, 50, 51, 0] with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Any = self.scheduler_classes[0] A_ : Union[str, Any] = self.get_scheduler_config() A_ : Optional[int] = scheduler_class(**snake_case ) A_ : Union[str, Any] = [100, 87, 50, 1, 0] A_ : Optional[int] = len(snake_case ) with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Union[str, Any] = self.scheduler_classes[0] A_ : Optional[Any] = self.get_scheduler_config() A_ : Optional[int] = scheduler_class(**snake_case ) A_ : Optional[int] = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=snake_case )
300
1
from typing import Any import numpy as np def __snake_case ( _lowerCAmelCase : np.ndarray ) -> bool: return np.array_equal(_lowerCAmelCase , matrix.conjugate().T ) def __snake_case ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ) -> Any: A_ : List[str] = v.conjugate().T A_ : List[Any] = v_star.dot(_lowerCAmelCase ) assert isinstance(_lowerCAmelCase , np.ndarray ) return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase )) def __snake_case ( ) -> None: A_ : int = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) A_ : Tuple = np.array([[1], [2], [3]] ) assert is_hermitian(_lowerCAmelCase ), f"{a} is not hermitian." print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) ) A_ : Optional[int] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(_lowerCAmelCase ), f"{a} is not hermitian." assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
300
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : int = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]: for attribute in key.split("." ): A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: A_ : Tuple = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Any = value elif weight_type == "bias": A_ : str = value else: A_ : Any = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]: A_ : Optional[Any] = [] A_ : Any = fairseq_model.state_dict() A_ : Union[str, Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ : str = None for name, value in fairseq_dict.items(): A_ : Tuple = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) A_ : Optional[Any] = True elif name.split("." )[0] == "proj": A_ : Dict = fairseq_model.proj A_ : List[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ : int = True if "*" in mapped_key: A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2] A_ : int = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: A_ : List[Any] = "weight_g" elif "weight_v" in name: A_ : List[Any] = "weight_v" elif "bias" in name: A_ : Dict = "bias" elif "weight" in name: A_ : List[Any] = "weight" else: A_ : Dict = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: A_ : Any = full_name.split("conv_layers." )[-1] A_ : Optional[int] = name.split("." ) A_ : Optional[Any] = int(items[0] ) A_ : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) A_ : List[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) A_ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) A_ : List[Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) A_ : Tuple = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str: A_ , A_ : List[str] = emb.weight.shape A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) A_ : List[Any] = emb.weight.data return lin_layer def __snake_case ( _lowerCAmelCase : str ) -> Tuple: with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: A_ : int = f.readlines() A_ : Dict = [line.split(" " )[0] for line in lines] A_ : Tuple = len(_lowerCAmelCase ) A_ : Union[str, Any] = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple: A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) A_ : str = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) A_ : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ : Union[str, Any] = model[0].eval() # set weights for wav2vec2 encoder A_ : Tuple = WavaVecaModel(_lowerCAmelCase ) A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase ) A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) A_ : Optional[Any] = False # add projection layer A_ : Optional[Any] = nn.Parameter(projection_layer.weight ) A_ : int = nn.Parameter(projection_layer.bias ) A_ : str = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) A_ : Optional[int] = hf_wavavec.config.to_dict() A_ : int = tokenizer.pad_token_id A_ : List[str] = tokenizer.bos_token_id A_ : List[str] = tokenizer.eos_token_id A_ : List[str] = "speech_to_text_2" A_ : Tuple = "wav2vec2" A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') _lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
300
1
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __magic_name__ ( unittest.TestCase ): """simple docstring""" def __init__( self :Tuple , snake_case :Any , snake_case :List[Any]=13 , snake_case :int=7 , snake_case :int=True , snake_case :Union[str, Any]=True , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Optional[int]=99 , snake_case :Tuple=32 , snake_case :List[Any]=5 , snake_case :int=4 , snake_case :List[str]=37 , snake_case :Tuple="gelu" , snake_case :List[str]=0.1 , snake_case :List[Any]=0.1 , snake_case :Union[str, Any]=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Union[str, Any]=0.02 , snake_case :List[Any]=4 , ): '''simple docstring''' A_ : Any = parent A_ : Union[str, Any] = batch_size A_ : int = seq_length A_ : Optional[int] = is_training A_ : Optional[Any] = use_attention_mask A_ : Union[str, Any] = use_token_type_ids A_ : str = use_labels A_ : Any = vocab_size A_ : Any = hidden_size A_ : Union[str, Any] = num_hidden_layers A_ : Any = num_attention_heads A_ : int = intermediate_size A_ : Tuple = hidden_act A_ : Union[str, Any] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : Dict = type_vocab_size A_ : Tuple = type_sequence_label_size A_ : Any = initializer_range A_ : int = num_choices def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Tuple = None if self.use_attention_mask: A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) A_ : str = None if self.use_token_type_ids: A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : List[Any] = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Optional[Any] = self.prepare_config_and_inputs() A_ , A_ , A_ , A_ : str = config_and_inputs A_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : str = self.prepare_config_and_inputs() A_ , A_ , A_ , A_ : List[Any] = config_and_inputs A_ : Dict = True A_ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = True __UpperCamelCase = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Tuple = FlaxRobertaModelTester(self ) @slow def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' for model_class_name in self.all_model_classes: A_ : List[str] = model_class_name.from_pretrained("roberta-base" , from_pt=snake_case ) A_ : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case )
300
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : """simple docstring""" def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ): '''simple docstring''' A_ : str = parent A_ : str = batch_size A_ : str = seq_length A_ : Any = is_training A_ : Any = use_input_mask A_ : str = use_token_type_ids A_ : Tuple = use_labels A_ : Optional[Any] = vocab_size A_ : Dict = hidden_size A_ : str = num_hidden_layers A_ : Dict = num_attention_heads A_ : str = intermediate_size A_ : int = hidden_act A_ : List[Any] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : List[Any] = type_vocab_size A_ : Any = type_sequence_label_size A_ : Dict = initializer_range A_ : Any = num_labels A_ : Optional[int] = num_choices A_ : Optional[Any] = scope A_ : Any = range_bbox def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A_ : str = bbox[i, j, 3] A_ : Union[str, Any] = bbox[i, j, 1] A_ : List[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: A_ : Any = bbox[i, j, 2] A_ : Tuple = bbox[i, j, 0] A_ : int = t A_ : int = tf.convert_to_tensor(snake_case ) A_ : Any = None if self.use_input_mask: A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : str = None if self.use_token_type_ids: A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Dict = None A_ : List[Any] = None A_ : List[str] = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : str = ids_tensor([self.batch_size] , self.num_choices ) A_ : int = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ): '''simple docstring''' A_ : Any = TFLayoutLMModel(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) A_ : str = model(snake_case , snake_case , token_type_ids=snake_case ) A_ : List[Any] = model(snake_case , snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ): '''simple docstring''' A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Union[str, Any] = self.num_labels A_ : int = TFLayoutLMForSequenceClassification(config=snake_case ) A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.num_labels A_ : str = TFLayoutLMForTokenClassification(config=snake_case ) A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case ) A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = config_and_inputs A_ : Optional[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 10 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Tuple = TFLayoutLMModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def __snake_case ( ) -> Optional[Any]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs() # forward pass A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the sequence output on [0, :3, :3] A_ : List[Any] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) ) # test the pooled output on [1, :3] A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs() # forward pass A_ : Dict = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar A_ : List[str] = outputs.loss A_ : Union[str, Any] = (2,) self.assertEqual(loss.shape , snake_case ) # test the shape of the logits A_ : Tuple = outputs.logits A_ : Tuple = (2, 2) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) # test the shape of the logits A_ : Dict = outputs.logits A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the shape of the logits A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , snake_case ) self.assertEqual(outputs.end_logits.shape , snake_case )
300
1
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) A_ : Optional[Any] = str(bin(_lowerCAmelCase ) )[2:] # remove the leading "0b" A_ : Union[str, Any] = str(bin(_lowerCAmelCase ) )[2:] A_ : Union[str, Any] = max(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) return "0b" + "".join( str(int("1" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(_lowerCAmelCase ) , b_binary.zfill(_lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
300
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore _lowerCAmelCase : Optional[int] = ''' Human: <<task>> Assistant: ''' _lowerCAmelCase : int = '''huggingface-tools/default-prompts''' _lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''} def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]: if prompt_or_repo_id is None: A_ : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id A_ : Optional[Any] = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
300
1
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class __magic_name__ ( unittest.TestCase ): """simple docstring""" def __init__( self :Union[str, Any] , snake_case :str , snake_case :Any=13 , snake_case :Tuple=7 , snake_case :str=True , snake_case :int=True , snake_case :Union[str, Any]=True , snake_case :Dict=True , snake_case :Dict=99 , snake_case :Any=32 , snake_case :Optional[int]=5 , snake_case :Tuple=4 , snake_case :Optional[Any]=37 , snake_case :Optional[Any]="gelu" , snake_case :int=0.1 , snake_case :str=0.1 , snake_case :Optional[Any]=512 , snake_case :Optional[Any]=16 , snake_case :Any=2 , snake_case :Optional[Any]=0.02 , snake_case :Optional[Any]=4 , ): '''simple docstring''' A_ : int = parent A_ : Optional[int] = batch_size A_ : Dict = seq_length A_ : List[Any] = is_training A_ : Dict = use_attention_mask A_ : Optional[int] = use_token_type_ids A_ : Any = use_labels A_ : str = vocab_size A_ : Any = hidden_size A_ : Dict = num_hidden_layers A_ : Dict = num_attention_heads A_ : str = intermediate_size A_ : Dict = hidden_act A_ : Optional[Any] = hidden_dropout_prob A_ : Optional[Any] = attention_probs_dropout_prob A_ : Tuple = max_position_embeddings A_ : int = type_vocab_size A_ : str = type_sequence_label_size A_ : Optional[Any] = initializer_range A_ : List[str] = num_choices def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Any = None if self.use_attention_mask: A_ : int = random_attention_mask([self.batch_size, self.seq_length] ) A_ : List[Any] = None if self.use_token_type_ids: A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : List[Any] = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : List[Any] = self.prepare_config_and_inputs() A_ , A_ , A_ , A_ : str = config_and_inputs A_ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : str = FlaxAlbertModelTester(self ) @slow def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for model_class_name in self.all_model_classes: A_ : List[Any] = model_class_name.from_pretrained("albert-base-v2" ) A_ : Any = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case ) @require_flax class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Any = FlaxAlbertModel.from_pretrained("albert-base-v2" ) A_ : str = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) A_ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A_ : Tuple = model(snake_case , attention_mask=snake_case )[0] A_ : str = (1, 11, 768) self.assertEqual(output.shape , snake_case ) A_ : str = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case , atol=1e-4 ) )
300
def __snake_case ( _lowerCAmelCase : list ) -> list: if len(_lowerCAmelCase ) <= 1: return [tuple(_lowerCAmelCase )] A_ : Tuple = [] def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ): A_ : List[str] = [0] * n res.append(tuple(_lowerCAmelCase ) ) A_ : int = 0 while i < n: if c[i] < i: if i % 2 == 0: A_ , A_ : str = arr[i], arr[0] else: A_ , A_ : List[str] = arr[i], arr[c[i]] res.append(tuple(_lowerCAmelCase ) ) c[i] += 1 A_ : Tuple = 0 else: A_ : Dict = 0 i += 1 generate(len(_lowerCAmelCase ) , _lowerCAmelCase ) return res if __name__ == "__main__": _lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip() _lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
300
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase : Optional[int] = { '''configuration_blenderbot_small''': [ '''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotSmallConfig''', '''BlenderbotSmallOnnxConfig''', ], '''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = ['''BlenderbotSmallTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[int] = [ '''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotSmallForCausalLM''', '''BlenderbotSmallForConditionalGeneration''', '''BlenderbotSmallModel''', '''BlenderbotSmallPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Union[str, Any] = [ '''TFBlenderbotSmallForConditionalGeneration''', '''TFBlenderbotSmallModel''', '''TFBlenderbotSmallPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = [ '''FlaxBlenderbotSmallForConditionalGeneration''', '''FlaxBlenderbotSmallModel''', '''FlaxBlenderbotSmallPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys _lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
300
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer _lowerCAmelCase : int = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase : List[Any] = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } _lowerCAmelCase : Any = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = RobertaTokenizer def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ): '''simple docstring''' super().__init__( snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , ) A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space: A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) ) A_ : Optional[int] = add_prefix_space A_ : int = pre_tok_class(**snake_case ) A_ : Optional[int] = add_prefix_space A_ : Optional[int] = "post_processor" A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case ) if tokenizer_component_instance: A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : List[Any] = tuple(state["sep"] ) if "cls" in state: A_ : Optional[Any] = tuple(state["cls"] ) A_ : Tuple = False if state.get("add_prefix_space" , snake_case ) != add_prefix_space: A_ : List[Any] = add_prefix_space A_ : Optional[int] = True if state.get("trim_offsets" , snake_case ) != trim_offsets: A_ : List[str] = trim_offsets A_ : Any = True if changes_to_apply: A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) ) A_ : Any = component_class(**snake_case ) setattr(self.backend_tokenizer , snake_case , snake_case ) @property def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ): '''simple docstring''' A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value A_ : Any = value def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ): '''simple docstring''' A_ : Any = kwargs.get("is_split_into_words" , snake_case ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ): '''simple docstring''' A_ : Any = kwargs.get("is_split_into_words" , snake_case ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ): '''simple docstring''' A_ : str = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ): '''simple docstring''' A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ): '''simple docstring''' A_ : Any = [self.sep_token_id] A_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
300
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = StableDiffusionPanoramaPipeline __UpperCamelCase = TEXT_TO_IMAGE_PARAMS __UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) A_ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) A_ : Dict = DDIMScheduler() torch.manual_seed(0 ) A_ : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) A_ : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) A_ : int = CLIPTextModel(snake_case ) A_ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A_ : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Any] , snake_case :int=0 ): '''simple docstring''' A_ : Optional[int] = torch.manual_seed(snake_case ) A_ : int = { "prompt": "a photo of the dolomites", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator A_ : List[Any] = self.get_dummy_components() A_ : Any = StableDiffusionPanoramaPipeline(**snake_case ) A_ : Optional[Any] = sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) A_ : str = self.get_dummy_inputs(snake_case ) A_ : Optional[int] = sd_pipe(**snake_case ).images A_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : List[str] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator A_ : Any = self.get_dummy_components() A_ : Optional[Any] = StableDiffusionPanoramaPipeline(**snake_case ) A_ : Dict = sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) A_ : int = self.get_dummy_inputs(snake_case ) A_ : Dict = "french fries" A_ : int = sd_pipe(**snake_case , negative_prompt=snake_case ) A_ : Optional[int] = output.images A_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : Tuple = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator A_ : List[Any] = self.get_dummy_components() A_ : Optional[Any] = StableDiffusionPanoramaPipeline(**snake_case ) A_ : Tuple = sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) A_ : Union[str, Any] = self.get_dummy_inputs(snake_case ) A_ : Tuple = sd_pipe(**snake_case , view_batch_size=2 ) A_ : List[str] = output.images A_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : Union[str, Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator A_ : Dict = self.get_dummy_components() A_ : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" ) A_ : str = StableDiffusionPanoramaPipeline(**snake_case ) A_ : Optional[Any] = sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) A_ : Tuple = self.get_dummy_inputs(snake_case ) A_ : Union[str, Any] = sd_pipe(**snake_case ).images A_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : List[Any] = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator A_ : Optional[int] = self.get_dummy_components() A_ : str = PNDMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=snake_case ) A_ : Tuple = StableDiffusionPanoramaPipeline(**snake_case ) A_ : List[str] = sd_pipe.to(snake_case ) sd_pipe.set_progress_bar_config(disable=snake_case ) A_ : int = self.get_dummy_inputs(snake_case ) A_ : str = sd_pipe(**snake_case ).images A_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : Any = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Union[str, Any]=0 ): '''simple docstring''' A_ : List[Any] = torch.manual_seed(snake_case ) A_ : int = { "prompt": "a photo of the dolomites", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = "stabilityai/stable-diffusion-2-base" A_ : Optional[Any] = DDIMScheduler.from_pretrained(snake_case , subfolder="scheduler" ) A_ : Any = StableDiffusionPanoramaPipeline.from_pretrained(snake_case , scheduler=snake_case , safety_checker=snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() A_ : List[Any] = self.get_inputs() A_ : Dict = pipe(**snake_case ).images A_ : List[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2_048, 3) A_ : List[str] = np.array( [ 0.36968392, 0.27025372, 0.32446766, 0.28379387, 0.36363274, 0.30733347, 0.27100027, 0.27054125, 0.25536096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Tuple = StableDiffusionPanoramaPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base" , safety_checker=snake_case ) A_ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() A_ : str = self.get_inputs() A_ : Optional[Any] = pipe(**snake_case ).images A_ : List[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2_048, 3) A_ : Dict = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Tuple = 0 def callback_fn(snake_case :int , snake_case :int , snake_case :torch.FloatTensor ) -> None: A_ : Tuple = True nonlocal number_of_steps number_of_steps += 1 if step == 1: A_ : List[str] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) A_ : Union[str, Any] = latents[0, -3:, -3:, -1] A_ : Optional[int] = np.array( [ 0.18681869, 0.33907816, 0.5361276, 0.14432865, -0.02856611, -0.73941123, 0.23397987, 0.47322682, -0.37823164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: A_ : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) A_ : Union[str, Any] = latents[0, -3:, -3:, -1] A_ : Tuple = np.array( [ 0.18539645, 0.33987248, 0.5378559, 0.14437142, -0.02455261, -0.7338317, 0.23990755, 0.47356272, -0.3786505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 A_ : int = False A_ : Tuple = "stabilityai/stable-diffusion-2-base" A_ : str = DDIMScheduler.from_pretrained(snake_case , subfolder="scheduler" ) A_ : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case , scheduler=snake_case , safety_checker=snake_case ) A_ : str = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() A_ : Dict = self.get_inputs() pipe(**snake_case , callback=snake_case , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A_ : Dict = "stabilityai/stable-diffusion-2-base" A_ : Tuple = DDIMScheduler.from_pretrained(snake_case , subfolder="scheduler" ) A_ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case , scheduler=snake_case , safety_checker=snake_case ) A_ : Union[str, Any] = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A_ : List[Any] = self.get_inputs() A_ : Dict = pipe(**snake_case ) A_ : Union[str, Any] = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
300
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCAmelCase : int = '''\ @misc{wu2016googles, title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } ''' _lowerCAmelCase : Tuple = '''\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the \'GLEU score\'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score\'s range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. ''' _lowerCAmelCase : int = '''\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: \'google_bleu\': google_bleu score Examples: Example 1: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.44 Example 2: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.61 Example 3: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results["google_bleu"], 2)) 0.53 Example 4: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results["google_bleu"], 2)) 0.4 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case ) }
300
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __magic_name__ ( unittest.TestCase ): """simple docstring""" def __init__( self :List[str] , snake_case :Any , snake_case :Optional[int]=7 , snake_case :str=3 , snake_case :Optional[int]=18 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :Optional[int]=True , snake_case :Optional[Any]=None , snake_case :Tuple=True , snake_case :Tuple=None , snake_case :str=True , snake_case :List[Any]=[0.48145466, 0.4578275, 0.40821073] , snake_case :str=[0.26862954, 0.26130258, 0.27577711] , snake_case :List[Any]=True , ): '''simple docstring''' A_ : Tuple = size if size is not None else {"height": 224, "width": 224} A_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 18, "width": 18} A_ : Tuple = parent A_ : str = batch_size A_ : Any = num_channels A_ : Tuple = image_size A_ : Tuple = min_resolution A_ : List[str] = max_resolution A_ : Dict = do_resize A_ : Any = size A_ : Optional[int] = do_center_crop A_ : str = crop_size A_ : List[str] = do_normalize A_ : Optional[int] = image_mean A_ : List[str] = image_std A_ : Union[str, Any] = do_convert_rgb def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def SCREAMING_SNAKE_CASE ( self :str , snake_case :Optional[Any]=False , snake_case :Optional[Any]=False , snake_case :Optional[Any]=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: A_ : Any = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: A_ : Tuple = [] for i in range(self.batch_size ): A_ , A_ : Union[str, Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension A_ : Any = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] if torchify: A_ : Union[str, Any] = [torch.from_numpy(snake_case ) for x in image_inputs] return image_inputs @require_torch @require_vision class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Any = ChineseCLIPImageProcessingTester(self , do_center_crop=snake_case ) @property def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , "do_resize" ) ) self.assertTrue(hasattr(snake_case , "size" ) ) self.assertTrue(hasattr(snake_case , "do_center_crop" ) ) self.assertTrue(hasattr(snake_case , "center_crop" ) ) self.assertTrue(hasattr(snake_case , "do_normalize" ) ) self.assertTrue(hasattr(snake_case , "image_mean" ) ) self.assertTrue(hasattr(snake_case , "image_std" ) ) self.assertTrue(hasattr(snake_case , "do_convert_rgb" ) ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 224, "width": 224} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) A_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input A_ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ : str = image_processing(snake_case , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input A_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ : int = image_processing(snake_case , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : str = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input A_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ : Any = image_processing(snake_case , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) @require_torch @require_vision class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Tuple = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=snake_case ) A_ : Dict = 3 @property def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , "do_resize" ) ) self.assertTrue(hasattr(snake_case , "size" ) ) self.assertTrue(hasattr(snake_case , "do_center_crop" ) ) self.assertTrue(hasattr(snake_case , "center_crop" ) ) self.assertTrue(hasattr(snake_case , "do_normalize" ) ) self.assertTrue(hasattr(snake_case , "image_mean" ) ) self.assertTrue(hasattr(snake_case , "image_std" ) ) self.assertTrue(hasattr(snake_case , "do_convert_rgb" ) ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : int = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input A_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ : Optional[int] = image_processing(snake_case , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
300
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]: A_ : Tuple = tmp_path / "cache" A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str: A_ : List[Any] = tmp_path / "cache" A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : int = features.copy() if features else default_expected_features A_ : str = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]: A_ : Dict = tmp_path / "cache" A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]: if issubclass(_lowerCAmelCase , _lowerCAmelCase ): A_ : int = parquet_path elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): A_ : Optional[int] = [parquet_path] A_ : Optional[int] = tmp_path / "cache" A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) for split in splits: A_ : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]: A_ : Optional[Any] = tmp_path / "cache" A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ : Union[str, Any] = ParquetDatasetReader( {"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple: A_ : Optional[Any] = tmp_path / "cache" A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : List[str] = features.copy() if features else default_expected_features A_ : Tuple = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]: if split: A_ : Any = {split: parquet_path} else: A_ : Optional[Any] = "train" A_ : str = {"train": parquet_path, "test": parquet_path} A_ : Any = tmp_path / "cache" A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict: A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) A_ : Dict = pf.read() assert dataset.data.table == output_table def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]: A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" ) A_ : int = {"image": [image_path]} A_ : Optional[Any] = Features({"image": Image()} ) A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase ) A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any: assert get_writer_batch_size(_lowerCAmelCase ) == expected
300
1
# Algorithm for the pigeonhole sorting def __snake_case ( _lowerCAmelCase : Dict ) -> Optional[int]: A_ : List[Any] = min(_lowerCAmelCase ) # min() finds the minimum value A_ : List[Any] = max(_lowerCAmelCase ) # max() finds the maximum value A_ : str = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size A_ : str = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. A_ : List[Any] = 0 for count in range(_lowerCAmelCase ): while holes[count] > 0: holes[count] -= 1 A_ : List[str] = count + min_val i += 1 def __snake_case ( ) -> Optional[int]: A_ : Tuple = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(_lowerCAmelCase ) print("Sorted order is:" , " ".join(_lowerCAmelCase ) ) if __name__ == "__main__": main()
300
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int: with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f: A_ : Optional[int] = json.load(_lowerCAmelCase ) A_ : Union[str, Any] = {} A_ : Tuple = [] A_ : Optional[Any] = [] for key, info in class_info.items(): A_ : Tuple = info["name"] class_names.append(info["name"] ) if info["isthing"]: thing_ids.append(int(_lowerCAmelCase ) ) A_ : Optional[Any] = thing_ids A_ : int = class_names return metadata class __magic_name__ ( unittest.TestCase ): """simple docstring""" def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ): '''simple docstring''' A_ : Tuple = parent A_ : List[str] = batch_size A_ : Optional[int] = num_channels A_ : Tuple = min_resolution A_ : List[Any] = max_resolution A_ : Union[str, Any] = do_resize A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size A_ : Tuple = do_normalize A_ : List[str] = image_mean A_ : List[Any] = image_std A_ : Union[str, Any] = class_info_file A_ : List[Any] = prepare_metadata(snake_case , snake_case ) A_ : Tuple = num_text A_ : str = repo_path # for the post_process_functions A_ : Any = 2 A_ : int = 10 A_ : Optional[int] = 10 A_ : Tuple = 3 A_ : Tuple = 4 A_ : str = num_labels A_ : int = do_reduce_labels A_ : List[Any] = ignore_index def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ): '''simple docstring''' if not batched: A_ : List[str] = image_inputs[0] if isinstance(snake_case , Image.Image ): A_ , A_ : Dict = image.size else: A_ , A_ : Tuple = image.shape[1], image.shape[2] if w < h: A_ : str = int(self.size["shortest_edge"] * h / w ) A_ : Any = self.size["shortest_edge"] elif w > h: A_ : Optional[int] = self.size["shortest_edge"] A_ : List[str] = int(self.size["shortest_edge"] * w / h ) else: A_ : List[str] = self.size["shortest_edge"] A_ : Optional[Any] = self.size["shortest_edge"] else: A_ : Tuple = [] for image in image_inputs: A_ , A_ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0] A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1] return expected_height, expected_width def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __UpperCamelCase = image_processing_class def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Union[str, Any] = OneFormerImageProcessorTester(self ) @property def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , "image_mean" ) ) self.assertTrue(hasattr(snake_case , "image_std" ) ) self.assertTrue(hasattr(snake_case , "do_normalize" ) ) self.assertTrue(hasattr(snake_case , "do_resize" ) ) self.assertTrue(hasattr(snake_case , "size" ) ) self.assertTrue(hasattr(snake_case , "ignore_index" ) ) self.assertTrue(hasattr(snake_case , "class_info_file" ) ) self.assertTrue(hasattr(snake_case , "num_text" ) ) self.assertTrue(hasattr(snake_case , "repo_path" ) ) self.assertTrue(hasattr(snake_case , "metadata" ) ) self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : List[str] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : Optional[Any] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : Any = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ): '''simple docstring''' A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # prepare image and target A_ : Tuple = self.image_processing_tester.num_labels A_ : str = None A_ : Tuple = None A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) if with_segmentation_maps: A_ : List[str] = num_labels if is_instance_map: A_ : List[str] = list(range(snake_case ) ) * 2 A_ : int = dict(enumerate(snake_case ) ) A_ : List[str] = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": A_ : int = [Image.fromarray(snake_case ) for annotation in annotations] A_ : List[str] = image_processor( snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , ) return inputs def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' def common(snake_case :Dict=False , snake_case :Optional[int]=None ): A_ : Tuple = self.comm_get_image_processor_inputs( with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case ) A_ : Optional[Any] = inputs["mask_labels"] A_ : List[Any] = inputs["class_labels"] A_ : Optional[Any] = inputs["pixel_values"] A_ : int = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text ) common() common(is_instance_map=snake_case ) common(is_instance_map=snake_case , segmentation_type="pil" ) common(is_instance_map=snake_case , segmentation_type="pil" ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = np.zeros((20, 50) ) A_ : List[str] = 1 A_ : int = 1 A_ : Optional[Any] = 1 A_ : Any = binary_mask_to_rle(snake_case ) self.assertEqual(len(snake_case ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs() A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case ) self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : str = self.image_processing_tester.get_fake_oneformer_outputs() A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Tuple = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs() A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
300
1
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = 42 class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = True @register_to_config def __init__( self :Dict , snake_case :int = 3 , snake_case :int = 3 , snake_case :Tuple[str] = ("DownEncoderBlock2D",) , snake_case :Tuple[str] = ("UpDecoderBlock2D",) , snake_case :Tuple[int] = (64,) , snake_case :int = 1 , snake_case :str = "silu" , snake_case :int = 4 , snake_case :int = 32 , snake_case :int = 32 , snake_case :float = 0.18215 , ): '''simple docstring''' super().__init__() # pass init params to Encoder A_ : str = Encoder( in_channels=snake_case , out_channels=snake_case , down_block_types=snake_case , block_out_channels=snake_case , layers_per_block=snake_case , act_fn=snake_case , norm_num_groups=snake_case , double_z=snake_case , ) # pass init params to Decoder A_ : Any = Decoder( in_channels=snake_case , out_channels=snake_case , up_block_types=snake_case , block_out_channels=snake_case , layers_per_block=snake_case , norm_num_groups=snake_case , act_fn=snake_case , ) A_ : Optional[int] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) A_ : Optional[int] = nn.Convad(snake_case , snake_case , 1 ) A_ : List[str] = False A_ : List[str] = False # only relevant if vae tiling is enabled A_ : str = self.config.sample_size A_ : str = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) A_ : int = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) A_ : Union[str, Any] = 0.25 def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Optional[int]=False ): '''simple docstring''' if isinstance(snake_case , (Encoder, Decoder) ): A_ : List[str] = value def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :bool = True ): '''simple docstring''' A_ : Tuple = use_tiling def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' self.enable_tiling(snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : List[str] = True def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Optional[int] = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : List[Any] = {} def fn_recursive_add_processors(snake_case :str , snake_case :torch.nn.Module , snake_case :Dict[str, AttentionProcessor] ): if hasattr(snake_case , "set_processor" ): A_ : str = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}" , snake_case , snake_case ) return processors for name, module in self.named_children(): fn_recursive_add_processors(snake_case , snake_case , snake_case ) return processors def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): '''simple docstring''' A_ : Optional[int] = len(self.attn_processors.keys() ) if isinstance(snake_case , snake_case ) and len(snake_case ) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(snake_case )} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(snake_case :str , snake_case :torch.nn.Module , snake_case :Dict ): if hasattr(snake_case , "set_processor" ): if not isinstance(snake_case , snake_case ): module.set_processor(snake_case ) else: module.set_processor(processor.pop(f"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}" , snake_case , snake_case ) for name, module in self.named_children(): fn_recursive_attn_processor(snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :torch.FloatTensor , snake_case :bool = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(snake_case , return_dict=snake_case ) if self.use_slicing and x.shape[0] > 1: A_ : int = [self.encoder(snake_case ) for x_slice in x.split(1 )] A_ : Union[str, Any] = torch.cat(snake_case ) else: A_ : Union[str, Any] = self.encoder(snake_case ) A_ : List[str] = self.quant_conv(snake_case ) A_ : str = DiagonalGaussianDistribution(snake_case ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :torch.FloatTensor , snake_case :bool = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(snake_case , return_dict=snake_case ) A_ : Optional[Any] = self.post_quant_conv(snake_case ) A_ : Dict = self.decoder(snake_case ) if not return_dict: return (dec,) return DecoderOutput(sample=snake_case ) @apply_forward_hook def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :torch.FloatTensor , snake_case :bool = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: A_ : Optional[int] = [self._decode(snake_case ).sample for z_slice in z.split(1 )] A_ : int = torch.cat(snake_case ) else: A_ : Optional[Any] = self._decode(snake_case ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :int , snake_case :List[str] ): '''simple docstring''' A_ : Tuple = min(a.shape[2] , b.shape[2] , snake_case ) for y in range(snake_case ): A_ : Union[str, Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def SCREAMING_SNAKE_CASE ( self :str , snake_case :List[str] , snake_case :str , snake_case :Optional[Any] ): '''simple docstring''' A_ : List[Any] = min(a.shape[3] , b.shape[3] , snake_case ) for x in range(snake_case ): A_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :bool = True ): '''simple docstring''' A_ : Tuple = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) A_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor ) A_ : Union[str, Any] = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. A_ : Optional[Any] = [] for i in range(0 , x.shape[2] , snake_case ): A_ : List[Any] = [] for j in range(0 , x.shape[3] , snake_case ): A_ : Optional[Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] A_ : Optional[Any] = self.encoder(snake_case ) A_ : str = self.quant_conv(snake_case ) row.append(snake_case ) rows.append(snake_case ) A_ : Union[str, Any] = [] for i, row in enumerate(snake_case ): A_ : Optional[int] = [] for j, tile in enumerate(snake_case ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: A_ : int = self.blend_v(rows[i - 1][j] , snake_case , snake_case ) if j > 0: A_ : int = self.blend_h(row[j - 1] , snake_case , snake_case ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(snake_case , dim=3 ) ) A_ : str = torch.cat(snake_case , dim=2 ) A_ : Optional[Any] = DiagonalGaussianDistribution(snake_case ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :torch.FloatTensor , snake_case :bool = True ): '''simple docstring''' A_ : List[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) A_ : str = int(self.tile_sample_min_size * self.tile_overlap_factor ) A_ : Optional[Any] = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. A_ : Dict = [] for i in range(0 , z.shape[2] , snake_case ): A_ : List[Any] = [] for j in range(0 , z.shape[3] , snake_case ): A_ : int = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] A_ : Any = self.post_quant_conv(snake_case ) A_ : Optional[int] = self.decoder(snake_case ) row.append(snake_case ) rows.append(snake_case ) A_ : Optional[Any] = [] for i, row in enumerate(snake_case ): A_ : int = [] for j, tile in enumerate(snake_case ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: A_ : Dict = self.blend_v(rows[i - 1][j] , snake_case , snake_case ) if j > 0: A_ : int = self.blend_h(row[j - 1] , snake_case , snake_case ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(snake_case , dim=3 ) ) A_ : Union[str, Any] = torch.cat(snake_case , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :torch.FloatTensor , snake_case :bool = False , snake_case :bool = True , snake_case :Optional[torch.Generator] = None , ): '''simple docstring''' A_ : Optional[int] = sample A_ : Union[str, Any] = self.encode(snake_case ).latent_dist if sample_posterior: A_ : List[Any] = posterior.sample(generator=snake_case ) else: A_ : List[str] = posterior.mode() A_ : Optional[Any] = self.decode(snake_case ).sample if not return_dict: return (dec,) return DecoderOutput(sample=snake_case )
300
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''data2vec-vision''' def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ): '''simple docstring''' super().__init__(**snake_case ) A_ : Dict = hidden_size A_ : Tuple = num_hidden_layers A_ : List[str] = num_attention_heads A_ : Any = intermediate_size A_ : Optional[Any] = hidden_act A_ : Any = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Optional[Any] = initializer_range A_ : List[str] = layer_norm_eps A_ : str = image_size A_ : Optional[int] = patch_size A_ : int = num_channels A_ : Optional[Any] = use_mask_token A_ : Optional[Any] = use_absolute_position_embeddings A_ : Optional[int] = use_relative_position_bias A_ : Dict = use_shared_relative_position_bias A_ : Any = layer_scale_init_value A_ : Optional[Any] = drop_path_rate A_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Tuple = out_indices A_ : Optional[Any] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : str = use_auxiliary_head A_ : List[Any] = auxiliary_loss_weight A_ : List[str] = auxiliary_channels A_ : Dict = auxiliary_num_convs A_ : List[str] = auxiliary_concat_input A_ : Optional[int] = semantic_loss_ignore_index class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return 1e-4
300
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''Wav2Vec2FeatureExtractor''' __UpperCamelCase = '''AutoTokenizer''' def __init__( self :Union[str, Any] , snake_case :Union[str, Any] , snake_case :str ): '''simple docstring''' super().__init__(snake_case , snake_case ) A_ : str = self.feature_extractor A_ : Any = False @classmethod def SCREAMING_SNAKE_CASE ( cls :Optional[Any] , snake_case :Optional[Any] , **snake_case :Union[str, Any] ): '''simple docstring''' try: return super().from_pretrained(snake_case , **snake_case ) except OSError: warnings.warn( f"Loading a tokenizer inside {cls.__name__} from a config that does not" " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , snake_case , ) A_ : str = WavaVecaFeatureExtractor.from_pretrained(snake_case , **snake_case ) A_ : List[Any] = WavaVecaCTCTokenizer.from_pretrained(snake_case , **snake_case ) return cls(feature_extractor=snake_case , tokenizer=snake_case ) def __call__( self :int , *snake_case :Any , **snake_case :int ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*snake_case , **snake_case ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) A_ : Union[str, Any] = kwargs.pop("raw_speech" ) else: A_ : Dict = kwargs.pop("audio" , snake_case ) A_ : List[str] = kwargs.pop("sampling_rate" , snake_case ) A_ : List[str] = kwargs.pop("text" , snake_case ) if len(snake_case ) > 0: A_ : Tuple = args[0] A_ : Union[str, Any] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: A_ : List[Any] = self.feature_extractor(snake_case , *snake_case , sampling_rate=snake_case , **snake_case ) if text is not None: A_ : Optional[int] = self.tokenizer(snake_case , **snake_case ) if text is None: return inputs elif audio is None: return encodings else: A_ : int = encodings["input_ids"] return inputs def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :List[str] , **snake_case :List[Any] ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*snake_case , **snake_case ) A_ : Optional[Any] = kwargs.pop("input_features" , snake_case ) A_ : Optional[Any] = kwargs.pop("labels" , snake_case ) if len(snake_case ) > 0: A_ : Any = args[0] A_ : Union[str, Any] = args[1:] if input_features is not None: A_ : Optional[int] = self.feature_extractor.pad(snake_case , *snake_case , **snake_case ) if labels is not None: A_ : List[str] = self.tokenizer.pad(snake_case , **snake_case ) if labels is None: return input_features elif input_features is None: return labels else: A_ : Optional[Any] = labels["input_ids"] return input_features def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , *snake_case :List[Any] , **snake_case :Tuple ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE ( self :str , *snake_case :Optional[int] , **snake_case :Dict ): '''simple docstring''' return self.tokenizer.decode(*snake_case , **snake_case ) @contextmanager def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) A_ : Any = True A_ : Any = self.tokenizer yield A_ : Dict = self.feature_extractor A_ : int = False
300
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging _lowerCAmelCase : str = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = ['''input_features''', '''attention_mask'''] def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ): '''simple docstring''' super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case ) A_ : Union[str, Any] = feature_size A_ : int = sampling_rate A_ : str = padding_value A_ : int = hop_length A_ : List[str] = win_length A_ : Any = frame_signal_scale A_ : str = preemphasis_coeff A_ : List[str] = mel_floor A_ : str = normalize_means A_ : Any = normalize_vars A_ : Optional[Any] = win_function A_ : Dict = return_attention_mask A_ : List[str] = win_length * sampling_rate // 1_000 A_ : List[str] = hop_length * sampling_rate // 1_000 A_ : List[str] = optimal_fft_length(self.sample_size ) A_ : str = (self.n_fft // 2) + 1 def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ): '''simple docstring''' if self.win_function == "hamming_window": A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case ) else: A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function ) A_ : Optional[int] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) A_ : Tuple = spectrogram( one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , ) return msfc_features.T def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ): '''simple docstring''' if self.normalize_means: A_ : int = x[:input_length].mean(axis=0 ) A_ : Any = np.subtract(snake_case , snake_case ) if self.normalize_vars: A_ : List[Any] = x[:input_length].std(axis=0 ) A_ : Optional[int] = np.divide(snake_case , snake_case ) if input_length < x.shape[0]: A_ : Optional[int] = padding_value # make sure array is in float32 A_ : Union[str, Any] = x.astype(np.floataa ) return x def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ): '''simple docstring''' A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )] def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) A_ : Optional[Any] = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): A_ : int = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Tuple = [raw_speech] # extract fbank features A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech] # convert into correct format for padding A_ : Union[str, Any] = BatchFeature({"input_features": features} ) A_ : str = self.pad( snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , ) # make sure list is in array format A_ : Optional[int] = padded_inputs.get("input_features" ) if isinstance(input_features[0] , snake_case ): A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features] A_ : Dict = padded_inputs.get("attention_mask" ) if attention_mask is not None: A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: A_ : Dict = ( np.array(snake_case , dtype=np.intaa ) if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) A_ : Optional[int] = self.normalize( padded_inputs["input_features"] , attention_mask=snake_case ) if return_tensors is not None: A_ : Dict = padded_inputs.convert_to_tensors(snake_case ) return padded_inputs
300
1
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py _lowerCAmelCase : List[str] = '''\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", author = "Lin, Chin-Yew and Och, Franz Josef", booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics", month = "aug 23{--}aug 27", year = "2004", address = "Geneva, Switzerland", publisher = "COLING", url = "https://www.aclweb.org/anthology/C04-1072", pages = "501--507", } ''' _lowerCAmelCase : Tuple = '''\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation, the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. ''' _lowerCAmelCase : Union[str, Any] = ''' Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: \'bleu\': bleu score, \'precisions\': geometric mean of n-gram precisions, \'brevity_penalty\': brevity penalty, \'length_ratio\': ratio of lengths, \'translation_length\': translation_length, \'reference_length\': reference_length Examples: >>> predictions = [ ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample ... ] >>> references = [ ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references) ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric("bleu") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results["bleu"]) 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :Optional[Any]=4 , snake_case :List[str]=False ): '''simple docstring''' A_ : str = compute_bleu( reference_corpus=snake_case , translation_corpus=snake_case , max_order=snake_case , smooth=snake_case ) ((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) : str = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
300
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' super().__init__() A_ : Tuple = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" f" `n_embd`: {n_embd} are not equal." ) A_ : List[Any] = prefix_inner_dim A_ : Union[str, Any] = prefix_hidden_dim A_ : List[str] = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = ( nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = GPTaConfig( vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , ) A_ : Optional[Any] = GPTaLMHeadModel(snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ): '''simple docstring''' A_ : Any = self.transformer.transformer.wte(snake_case ) A_ : str = self.encode_prefix(snake_case ) A_ : Union[str, Any] = self.decode_prefix(snake_case ) A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A_ : int = torch.cat((dummy_token, input_ids) , dim=1 ) A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ): '''simple docstring''' return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ): '''simple docstring''' return self.encode_prefix(snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Any = torch.split(snake_case , 1 , dim=0 ) A_ : Optional[int] = [] A_ : Union[str, Any] = [] for feature in features: A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature # Only support beam search for now A_ , A_ : Dict = self.generate_beam( input_embeds=snake_case , device=snake_case , eos_token_id=snake_case ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A_ : int = torch.stack(snake_case ) A_ : int = torch.stack(snake_case ) return generated_tokens, generated_seq_lengths @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ): '''simple docstring''' A_ : Optional[Any] = eos_token_id A_ : List[Any] = None A_ : List[Any] = None A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int ) A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool ) if input_embeds is not None: A_ : Any = input_embeds else: A_ : Optional[Any] = self.transformer.transformer.wte(snake_case ) for i in range(snake_case ): A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case ) A_ : str = outputs.logits A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A_ : List[str] = logits.softmax(-1 ).log() if scores is None: A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 ) A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] ) A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A_ : Union[str, Any] = next_tokens else: A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] ) A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 ) else: A_ : List[str] = -float(np.inf ) A_ : List[Any] = 0 A_ : Union[str, Any] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 A_ : Optional[Any] = scores_sum / seq_lengths[:, None] A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 ) A_ : str = next_tokens // scores_sum.shape[1] A_ : Union[str, Any] = seq_lengths[next_tokens_source] A_ : Optional[int] = next_tokens % scores_sum.shape[1] A_ : Tuple = next_tokens.unsqueeze(1 ) A_ : Tuple = tokens[next_tokens_source] A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 ) A_ : Dict = generated[next_tokens_source] A_ : Union[str, Any] = scores_sum_average * seq_lengths A_ : Optional[int] = is_stopped[next_tokens_source] A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 ) A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze() if is_stopped.all(): break A_ : int = scores / seq_lengths A_ : str = scores.argsort(descending=snake_case ) # tokens tensors are already padded to max_seq_length A_ : Dict = [tokens[i] for i in order] A_ : int = torch.stack(snake_case , dim=0 ) A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
300
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) _lowerCAmelCase : List[Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = ['''BeitFeatureExtractor'''] _lowerCAmelCase : Union[str, Any] = ['''BeitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BeitForImageClassification''', '''BeitForMaskedImageModeling''', '''BeitForSemanticSegmentation''', '''BeitModel''', '''BeitPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[int] = [ '''FlaxBeitForImageClassification''', '''FlaxBeitForMaskedImageModeling''', '''FlaxBeitModel''', '''FlaxBeitPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys _lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
300
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowerCAmelCase : Tuple = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ): '''simple docstring''' warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , snake_case , ) super().__init__(*snake_case , **snake_case )
300
1
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : """simple docstring""" def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ): '''simple docstring''' A_ : str = parent A_ : str = batch_size A_ : str = seq_length A_ : Any = is_training A_ : Any = use_input_mask A_ : str = use_token_type_ids A_ : Tuple = use_labels A_ : Optional[Any] = vocab_size A_ : Dict = hidden_size A_ : str = num_hidden_layers A_ : Dict = num_attention_heads A_ : str = intermediate_size A_ : int = hidden_act A_ : List[Any] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : List[Any] = type_vocab_size A_ : Any = type_sequence_label_size A_ : Dict = initializer_range A_ : Any = num_labels A_ : Optional[int] = num_choices A_ : Optional[Any] = scope A_ : Any = range_bbox def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A_ : str = bbox[i, j, 3] A_ : Union[str, Any] = bbox[i, j, 1] A_ : List[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: A_ : Any = bbox[i, j, 2] A_ : Tuple = bbox[i, j, 0] A_ : int = t A_ : int = tf.convert_to_tensor(snake_case ) A_ : Any = None if self.use_input_mask: A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : str = None if self.use_token_type_ids: A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Dict = None A_ : List[Any] = None A_ : List[str] = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : str = ids_tensor([self.batch_size] , self.num_choices ) A_ : int = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ): '''simple docstring''' A_ : Any = TFLayoutLMModel(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) A_ : str = model(snake_case , snake_case , token_type_ids=snake_case ) A_ : List[Any] = model(snake_case , snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ): '''simple docstring''' A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Union[str, Any] = self.num_labels A_ : int = TFLayoutLMForSequenceClassification(config=snake_case ) A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.num_labels A_ : str = TFLayoutLMForTokenClassification(config=snake_case ) A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case ) A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = config_and_inputs A_ : Optional[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 10 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Tuple = TFLayoutLMModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def __snake_case ( ) -> Optional[Any]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs() # forward pass A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the sequence output on [0, :3, :3] A_ : List[Any] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) ) # test the pooled output on [1, :3] A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs() # forward pass A_ : Dict = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar A_ : List[str] = outputs.loss A_ : Union[str, Any] = (2,) self.assertEqual(loss.shape , snake_case ) # test the shape of the logits A_ : Tuple = outputs.logits A_ : Tuple = (2, 2) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) # test the shape of the logits A_ : Dict = outputs.logits A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the shape of the logits A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , snake_case ) self.assertEqual(outputs.end_logits.shape , snake_case )
300
from __future__ import annotations def __snake_case ( _lowerCAmelCase : list[float] ) -> bool: if len(_lowerCAmelCase ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) A_ : List[str] = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
300
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore _lowerCAmelCase : Optional[int] = ''' Human: <<task>> Assistant: ''' _lowerCAmelCase : int = '''huggingface-tools/default-prompts''' _lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''} def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]: if prompt_or_repo_id is None: A_ : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id A_ : Optional[Any] = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
300
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging _lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ): '''simple docstring''' super().__init__() self.register_modules( vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A_ : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' self.enable_attention_slicing(snake_case ) @torch.no_grad() def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ): '''simple docstring''' if isinstance(snake_case , snake_case ): A_ : Dict = 1 elif isinstance(snake_case , snake_case ): A_ : Optional[Any] = len(snake_case ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(snake_case )}." ) # get prompt text embeddings A_ : int = self.tokenizer( snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) A_ : Dict = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A_ , A_ , A_ : int = text_embeddings.shape A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 ) A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A_ : Dict = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A_ : List[str] if negative_prompt is None: A_ : List[str] = [""] elif type(snake_case ) is not type(snake_case ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !=" f" {type(snake_case )}." ) elif isinstance(snake_case , snake_case ): A_ : Optional[Any] = [negative_prompt] elif batch_size != len(snake_case ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: A_ : Any = negative_prompt A_ : Optional[int] = text_input_ids.shape[-1] A_ : Dict = self.tokenizer( snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , ) A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A_ : Tuple = uncond_embeddings.shape[1] A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 ) A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) A_ : List[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A_ : Tuple = torch.randn( snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device ) A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to( self.device ) else: A_ : int = torch.randn( snake_case , generator=snake_case , device=self.device , dtype=snake_case ) A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case ) else: if latents_reference.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A_ : Tuple = latents_reference.to(self.device ) A_ : Any = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2 A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2 A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy A_ : Optional[Any] = 0 if dx < 0 else dx A_ : Optional[Any] = 0 if dy < 0 else dy A_ : List[str] = max(-dx , 0 ) A_ : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A_ : str = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ : List[str] = {} if accepts_eta: A_ : Union[str, Any] = eta for i, t in enumerate(self.progress_bar(snake_case ) ): # expand the latents if we are doing classifier free guidance A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case ) # predict the noise residual A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample # perform guidance if do_classifier_free_guidance: A_ , A_ : Dict = noise_pred.chunk(2 ) A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case , snake_case , snake_case ) A_ : List[str] = 1 / 0.18215 * latents A_ : Tuple = self.vae.decode(snake_case ).sample A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to( self.device ) A_ , A_ : List[str] = self.safety_checker( images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: A_ : List[str] = None if output_type == "pil": A_ : Optional[int] = self.numpy_to_pil(snake_case ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
300
1
from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
300
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict: A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase ) A_ : List[str] = nn.functional.normalize(_lowerCAmelCase ) return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = CLIPConfig __UpperCamelCase = ['''CLIPEncoderLayer'''] def __init__( self :int , snake_case :CLIPConfig ): '''simple docstring''' super().__init__(snake_case ) A_ : int = CLIPVisionModel(config.vision_config ) A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case ) A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case ) A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case ) A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case ) A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ): '''simple docstring''' A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output A_ : List[Any] = self.visual_projection(snake_case ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy() A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy() A_ : Union[str, Any] = [] A_ : Any = image_embeds.shape[0] for i in range(snake_case ): A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A_ : Optional[Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A_ : Optional[Any] = special_cos_dist[i][concept_idx] A_ : Tuple = self.special_care_embeds_weights[concept_idx].item() A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A_ : Any = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A_ : Tuple = cos_dist[i][concept_idx] A_ : Tuple = self.concept_embeds_weights[concept_idx].item() A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(snake_case ) result.append(snake_case ) A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ): '''simple docstring''' A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output A_ : int = self.visual_projection(snake_case ) A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds ) A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A_ : Optional[Any] = 0.0 A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) A_ : Optional[Any] = special_care * 0.01 A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
300
1
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP _lowerCAmelCase : List[str] = False try: _lowerCAmelCase : List[Any] = _is_package_available('''google.colab''') except ModuleNotFoundError: pass @input.register class __magic_name__ : """simple docstring""" def __init__( self :Union[str, Any] , snake_case :str = None , snake_case :list = [] ): '''simple docstring''' A_ : str = 0 A_ : int = choices A_ : Dict = prompt if sys.platform == "win32": A_ : Optional[int] = "*" else: A_ : Tuple = "➔ " def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[Any] , snake_case :str = "" ): '''simple docstring''' if sys.platform != "win32": writeColor(self.choices[index] , 32 , snake_case ) else: forceWrite(self.choices[index] , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :int ): '''simple docstring''' if index == self.position: forceWrite(f" {self.arrow_char} " ) self.write_choice(snake_case ) else: forceWrite(f" {self.choices[index]}" ) reset_cursor() def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Direction , snake_case :int = 1 ): '''simple docstring''' A_ : int = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(snake_case ) move_cursor(snake_case , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(snake_case )] for number in range(10 )] ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Union[str, Any] = int(chr(self.current_selection ) ) A_ : Dict = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , snake_case ) else: return else: return def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :int = 0 ): '''simple docstring''' if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) A_ : Optional[int] = default_choice for i in range(len(self.choices ) ): self.print_choice(snake_case ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: A_ : str = int(builtins.input() ) except ValueError: A_ : Dict = default_choice else: A_ : Tuple = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(snake_case , "\n" ) return choice
300
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]: A_ : Tuple = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict: for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" ) A_ : List[Any] = in_proj_weight[ : encoder_config.hidden_size, : ] A_ : Optional[Any] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A_ : Optional[Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any: A_ : Dict = dct.pop(_lowerCAmelCase ) A_ : List[Any] = val def __snake_case ( _lowerCAmelCase : List[str] ) -> int: if "handwritten" in checkpoint_url: A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" ) return im @torch.no_grad() def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]: A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase ) A_ : Tuple = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A_ : Tuple = 768 elif "large" in checkpoint_url: # use ViT-large encoder A_ : Optional[Any] = 1024 A_ : Union[str, Any] = 4096 A_ : Union[str, Any] = 24 A_ : List[Any] = 16 A_ : List[str] = 1024 else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Dict = False A_ : int = "relu" A_ : Optional[int] = 1024 A_ : Any = True A_ : List[Any] = False A_ : Optional[int] = False # load HuggingFace model A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase ) A_ : str = TrOCRForCausalLM(_lowerCAmelCase ) A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) model.eval() # load state_dict of original model, rename some keys A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"] A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A_ : Dict = state_dict.pop(_lowerCAmelCase ) if key.startswith("decoder" ) and "output_projection" not in key: A_ : List[str] = val else: A_ : Optional[Any] = val # load state dict model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size ) A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" ) A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase ) A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values # verify logits A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ) A_ : Tuple = outputs.logits A_ : Union[str, Any] = torch.Size([1, 1, 50265] ) if "trocr-base-handwritten" in checkpoint_url: A_ : Union[str, Any] = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: A_ : str = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: A_ : Optional[Any] = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: A_ : Optional[int] = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected" Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : List[str] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
300
1
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Optional[int] , *snake_case :str , **snake_case :Union[str, Any] ): '''simple docstring''' warnings.warn( "The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use SegformerImageProcessor instead." , snake_case , ) super().__init__(*snake_case , **snake_case )
300
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = 1 @register_to_config def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ): '''simple docstring''' A_ : Dict = sigma_max # setable values A_ : List[Any] = None self.set_sigmas(snake_case , snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ): '''simple docstring''' return sample def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ): '''simple docstring''' A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ): '''simple docstring''' A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(snake_case , snake_case ) A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) ) A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ): '''simple docstring''' return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) A_ : int = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda A_ : Dict = timesteps.to(self.discrete_sigmas.device ) A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device ) A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device ) A_ : Union[str, Any] = torch.zeros_like(snake_case ) A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods A_ : Optional[int] = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): A_ : Tuple = diffusion.unsqueeze(-1 ) A_ : Optional[Any] = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of A_ : List[Any] = randn_tensor( sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype ) A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device ) # compute step size from the model_output, the noise, and the snr A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term A_ : int = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): A_ : str = step_size.unsqueeze(-1 ) A_ : Optional[Any] = sample + step_size * model_output A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ): '''simple docstring''' A_ : Union[str, Any] = timesteps.to(original_samples.device ) A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps] A_ : List[Any] = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(snake_case ) * sigmas[:, None, None, None] ) A_ : Optional[int] = noise + original_samples return noisy_samples def __len__( self :Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
300
1
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Tuple = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, '''constant''': get_constant_schedule, '''constant_w_warmup''': get_constant_schedule_with_warmup, } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Optional[int] , snake_case :Dict=None , snake_case :str=None , *snake_case :Tuple , **snake_case :Union[str, Any] ): '''simple docstring''' super().__init__(*snake_case , **snake_case ) if config is None: assert isinstance(self.model , snake_case ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f" {self.model.__class__}" ) A_ : List[Any] = self.model.config else: A_ : Optional[Any] = config A_ : str = data_args A_ : Tuple = self.config.tgt_vocab_size if isinstance(self.config , snake_case ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for" " padding.." ) if self.args.label_smoothing == 0: A_ : Union[str, Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss A_ : List[Any] = label_smoothed_nll_loss def SCREAMING_SNAKE_CASE ( self :str , snake_case :int ): '''simple docstring''' if self.optimizer is None: A_ : str = ["bias", "LayerNorm.weight"] A_ : Dict = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], "weight_decay": 0.0, }, ] A_ : List[Any] = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: A_ : Any = Adafactor A_ : Optional[Any] = {"scale_parameter": False, "relative_step": False} else: A_ : int = AdamW A_ : Optional[int] = { "betas": (self.args.adam_betaa, self.args.adam_betaa), "eps": self.args.adam_epsilon, } A_ : Dict = self.args.learning_rate if self.sharded_ddp: A_ : str = OSS( params=snake_case , optim=snake_case , **snake_case , ) else: A_ : Optional[int] = optimizer_cls(snake_case , **snake_case ) if self.lr_scheduler is None: A_ : Optional[int] = self._get_lr_scheduler(snake_case ) else: # ignoring --lr_scheduler logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :List[Any] ): '''simple docstring''' A_ : str = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": A_ : Union[str, Any] = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": A_ : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: A_ : str = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case ) return scheduler def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :int , snake_case :List[Any] , snake_case :Optional[int] ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token A_ : Union[str, Any] = model(**snake_case , use_cache=snake_case )[0] A_ : Any = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models A_ , A_ : int = model(**snake_case , labels=snake_case , use_cache=snake_case )[:2] else: # compute label smoothed loss A_ : Dict = model(**snake_case , use_cache=snake_case )[0] A_ : Any = torch.nn.functional.log_softmax(snake_case , dim=-1 ) A_ , A_ : List[str] = self.loss_fn(snake_case , snake_case , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :int , snake_case :Dict ): '''simple docstring''' A_ : List[Any] = inputs.pop("labels" ) A_ , A_ : Dict = self._compute_loss(snake_case , snake_case , snake_case ) return loss def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :nn.Module , snake_case :Dict[str, Union[torch.Tensor, Any]] , snake_case :bool , snake_case :Optional[List[str]] = None , ): '''simple docstring''' A_ : Optional[Any] = self._prepare_inputs(snake_case ) A_ : Optional[int] = { "max_length": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, "num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: A_ : Any = self.model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **snake_case , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: A_ : List[Any] = self._pad_tensors_to_max_len(snake_case , gen_kwargs["max_length"] ) A_ : Tuple = inputs.pop("labels" ) with torch.no_grad(): # compute loss on predict data A_ , A_ : Dict = self._compute_loss(snake_case , snake_case , snake_case ) A_ : Union[str, Any] = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) A_ : Tuple = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: A_ : Optional[int] = self._pad_tensors_to_max_len(snake_case , gen_kwargs["max_length"] ) return (loss, logits, labels) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Tuple , snake_case :Dict ): '''simple docstring''' A_ : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( "Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be" f" padded to `max_length`={max_length}" ) A_ : Any = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) A_ : List[str] = tensor return padded_tensor
300
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float: A_ : Dict = a while True: A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - ( Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307 return float(_lowerCAmelCase ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
300
1
import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''): _lowerCAmelCase : str = True from torch.cuda.amp import autocast _lowerCAmelCase : Any = logging.getLogger(__name__) @dataclass class __magic_name__ : """simple docstring""" __UpperCamelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __UpperCamelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) __UpperCamelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) __UpperCamelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) __UpperCamelCase = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) __UpperCamelCase = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) __UpperCamelCase = field( default=0.99_9995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def __snake_case ( _lowerCAmelCase : ModelArguments , _lowerCAmelCase : TrainingArguments ) -> List[str]: logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) A_ : Dict = logging.WARNING if model_args.verbose_logging: A_ : str = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): A_ : Union[str, Any] = logging.INFO logger.setLevel(_lowerCAmelCase ) @dataclass class __magic_name__ : """simple docstring""" __UpperCamelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) __UpperCamelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCamelCase = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) __UpperCamelCase = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) __UpperCamelCase = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) __UpperCamelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) __UpperCamelCase = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) __UpperCamelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) __UpperCamelCase = field( default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class __magic_name__ : """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = "longest" __UpperCamelCase = None __UpperCamelCase = None def __call__( self :Optional[Any] , snake_case :List[Dict[str, Union[List[int], torch.Tensor]]] ): '''simple docstring''' A_ : str = self.feature_extractor.pad( snake_case , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) A_ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] ) A_ : str = batch["input_values"].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula A_ : Dict = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to( torch.long ) A_ : Optional[int] = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device ) # these two operations makes sure that all values # before the output lengths indices are attended to A_ : Union[str, Any] = 1 A_ : str = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices A_ : Dict = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=snake_case , min_masks=2 , ) return batch class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :List[Any] , *snake_case :Optional[Any] , snake_case :Union[str, Any]=1 , snake_case :Dict=0 , snake_case :List[str]=1.0 , **snake_case :Union[str, Any] ): '''simple docstring''' super().__init__(*snake_case , **snake_case ) A_ : int = 0 A_ : List[Any] = max_gumbel_temp A_ : str = min_gumbel_temp A_ : Optional[Any] = gumbel_temp_decay def SCREAMING_SNAKE_CASE ( self :int , snake_case :nn.Module , snake_case :Dict[str, Union[torch.Tensor, Any]] ): '''simple docstring''' model.train() A_ : str = self._prepare_inputs(snake_case ) if self.use_amp: with autocast(): A_ : List[str] = self.compute_loss(snake_case , snake_case ) else: A_ : Dict = self.compute_loss(snake_case , snake_case ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": A_ : Optional[int] = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": A_ : Optional[Any] = loss.sum() / (inputs["mask_time_indices"]).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" ) if self.args.gradient_accumulation_steps > 1: A_ : int = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(snake_case ).backward() elif self.use_apex: with amp.scale_loss(snake_case , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(snake_case ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def __snake_case ( ) -> List[str]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A_ , A_ , A_ : int = parser.parse_args_into_dataclasses() configure_logger(_lowerCAmelCase , _lowerCAmelCase ) # Downloading and loading a dataset from the hub. A_ : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" A_ : Union[str, Any] = DatasetDict() A_ : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , ) A_ : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" A_ : Tuple = DatasetDict() A_ : Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , ) A_ : Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported A_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCAmelCase ) def prepare_dataset(_lowerCAmelCase : List[Any] ): # check that all files have the correct sampling rate A_ , A_ : List[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays A_ : Any = datasets.map( _lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names ) # filter audio files that are too long A_ : Dict = vectorized_datasets.filter( lambda _lowerCAmelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(_lowerCAmelCase : int ): return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` A_ : Optional[int] = vectorized_datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 A_ : Any = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" " ``config.feat_extract_norm='layer'" ) A_ : Union[str, Any] = WavaVecaForPreTraining(_lowerCAmelCase ) A_ : str = DataCollatorForWavaVecaPretraining(model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase ) A_ : Optional[Any] = WavaVecaPreTrainer( model=_lowerCAmelCase , data_collator=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
300
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets _lowerCAmelCase : List[Any] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' _lowerCAmelCase : Union[str, Any] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' _lowerCAmelCase : Optional[Any] = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' A_ : List[str] = len(references[0] ) if any(len(snake_case ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A_ : int = [[refs[i] for refs in references] for i in range(snake_case )] A_ : Optional[Any] = TER( normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , ) A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
300
1
import math import qiskit def __snake_case ( _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1 ) -> qiskit.result.counts.Counts: if ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) or isinstance(_lowerCAmelCase , _lowerCAmelCase ) or isinstance(_lowerCAmelCase , _lowerCAmelCase ) ): raise TypeError("inputs must be integers." ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError("inputs must be positive." ) if ( (math.floor(_lowerCAmelCase ) != input_a) or (math.floor(_lowerCAmelCase ) != input_a) or (math.floor(_lowerCAmelCase ) != carry_in) ): raise ValueError("inputs must be exact integers." ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError("inputs must be less or equal to 2." ) # build registers A_ : int = qiskit.QuantumRegister(4 , "qr" ) A_ : Tuple = qiskit.ClassicalRegister(2 , "cr" ) # list the entries A_ : List[Any] = [input_a, input_a, carry_in] A_ : Optional[int] = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(_lowerCAmelCase ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(_lowerCAmelCase ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(_lowerCAmelCase ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , _lowerCAmelCase ) # measure the last two qbits A_ : Dict = qiskit.Aer.get_backend("aer_simulator" ) A_ : Any = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1000 ) return job.result().get_counts(_lowerCAmelCase ) if __name__ == "__main__": print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
300
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str: return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Any: return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=float("inf" ) ) -> int: for i in range(points_counts - 1 ): for j in range(i + 1 , _lowerCAmelCase ): A_ : Tuple = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=float("inf" ) ) -> Dict: for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ): for j in range(max(0 , i - 6 ) , _lowerCAmelCase ): A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]: # base case if points_counts <= 3: return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase ) # recursion A_ : Optional[int] = points_counts // 2 A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase ) A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid ) A_ : Tuple = min(_lowerCAmelCase , _lowerCAmelCase ) A_ : Dict = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowerCAmelCase ) A_ : Tuple = dis_between_closest_in_strip( _lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase ) return min(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Any: A_ : Optional[Any] = column_based_sort(_lowerCAmelCase , column=0 ) A_ : Optional[int] = column_based_sort(_lowerCAmelCase , column=1 ) return ( closest_pair_of_points_sqr( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) ** 0.5 if __name__ == "__main__": _lowerCAmelCase : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
300
1
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __snake_case ( _lowerCAmelCase : int ) -> int: # A local function to see if a dot lands in the circle. def is_in_circle(_lowerCAmelCase : float , _lowerCAmelCase : float ) -> bool: A_ : str = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle A_ : Optional[int] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(_lowerCAmelCase ) ) # The ratio of the area for circle to square is pi/4. A_ : int = proportion * 4 print(f"The estimated value of pi is {pi_estimate}" ) print(f"The numpy value of pi is {pi}" ) print(f"The total error is {abs(pi - pi_estimate )}" ) def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Callable[[float], float] , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : float = 1.0 , ) -> float: return mean( function_to_integrate(uniform(_lowerCAmelCase , _lowerCAmelCase ) ) for _ in range(_lowerCAmelCase ) ) * (max_value - min_value) def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : float = 1.0 ) -> None: def identity_function(_lowerCAmelCase : float ) -> float: return x A_ : Any = area_under_curve_estimator( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) A_ : Tuple = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" ) print(f"Estimated value is {estimated_value}" ) print(f"Expected value is {expected_value}" ) print(f"Total error is {abs(estimated_value - expected_value )}" ) print("******************" ) def __snake_case ( _lowerCAmelCase : int ) -> None: def function_to_integrate(_lowerCAmelCase : float ) -> float: return sqrt(4.0 - x * x ) A_ : List[str] = area_under_curve_estimator( _lowerCAmelCase , _lowerCAmelCase , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(f"Estimated value is {estimated_value}" ) print(f"Expected value is {pi}" ) print(f"Total error is {abs(estimated_value - pi )}" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
300
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : """simple docstring""" def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ): '''simple docstring''' A_ : Tuple = parent A_ : int = batch_size A_ : List[str] = image_size A_ : List[Any] = patch_size A_ : Optional[Any] = num_channels A_ : List[Any] = is_training A_ : Tuple = use_labels A_ : Union[str, Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Any = num_attention_heads A_ : List[str] = intermediate_size A_ : Optional[int] = hidden_act A_ : List[str] = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Any = type_sequence_label_size A_ : List[str] = initializer_range A_ : Dict = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Optional[int] = (image_size // patch_size) ** 2 A_ : List[str] = num_patches + 1 def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Tuple = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ): '''simple docstring''' A_ : Optional[Any] = ViTMSNModel(config=snake_case ) model.to(snake_case ) model.eval() A_ : int = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ): '''simple docstring''' A_ : Dict = self.type_sequence_label_size A_ : Tuple = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Union[str, Any] = model(snake_case , labels=snake_case ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : Union[str, Any] = 1 A_ : int = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Optional[Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : List[str] = self.prepare_config_and_inputs() A_ , A_ , A_ : Optional[int] = config_and_inputs A_ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __UpperCamelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Tuple = ViTMSNModelTester(self ) A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(snake_case ) A_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[str] = [*signature.parameters.keys()] A_ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __snake_case ( ) -> Optional[Any]: A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' torch.manual_seed(2 ) A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case ) A_ : List[str] = self.default_image_processor A_ : int = prepare_img() A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case ) # forward pass with torch.no_grad(): A_ : Optional[int] = model(**snake_case ) # verify the logits A_ : List[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
300
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : str = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''sew-d''' def __init__( self :Optional[Any] , snake_case :Optional[Any]=32 , snake_case :Optional[Any]=768 , snake_case :Tuple=12 , snake_case :Union[str, Any]=12 , snake_case :int=3_072 , snake_case :str=2 , snake_case :Optional[Any]=512 , snake_case :str=256 , snake_case :Optional[int]=True , snake_case :int=True , snake_case :Union[str, Any]=("p2c", "c2p") , snake_case :int="layer_norm" , snake_case :Any="gelu_python" , snake_case :List[Any]=0.1 , snake_case :Dict=0.1 , snake_case :Dict=0.1 , snake_case :Optional[Any]=0.0 , snake_case :Dict=0.1 , snake_case :Tuple=0.02 , snake_case :List[str]=1e-7 , snake_case :Tuple=1e-5 , snake_case :Tuple="group" , snake_case :Tuple="gelu" , snake_case :Union[str, Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case :Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case :List[str]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case :Optional[int]=False , snake_case :Dict=128 , snake_case :Dict=16 , snake_case :Any=True , snake_case :Tuple=0.05 , snake_case :Union[str, Any]=10 , snake_case :List[str]=2 , snake_case :List[Any]=0.0 , snake_case :List[Any]=10 , snake_case :int=0 , snake_case :List[Any]="mean" , snake_case :str=False , snake_case :str=False , snake_case :int=256 , snake_case :List[str]=0 , snake_case :List[Any]=1 , snake_case :Union[str, Any]=2 , **snake_case :str , ): '''simple docstring''' super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case ) A_ : Union[str, Any] = hidden_size A_ : Optional[Any] = feat_extract_norm A_ : Optional[int] = feat_extract_activation A_ : str = list(snake_case ) A_ : Optional[int] = list(snake_case ) A_ : Any = list(snake_case ) A_ : Union[str, Any] = conv_bias A_ : Tuple = num_conv_pos_embeddings A_ : Any = num_conv_pos_embedding_groups A_ : Dict = len(self.conv_dim ) A_ : int = num_hidden_layers A_ : Union[str, Any] = intermediate_size A_ : Dict = squeeze_factor A_ : Optional[int] = max_position_embeddings A_ : int = position_buckets A_ : Union[str, Any] = share_att_key A_ : Union[str, Any] = relative_attention A_ : Union[str, Any] = norm_rel_ebd A_ : List[Any] = list(snake_case ) A_ : List[Any] = hidden_act A_ : str = num_attention_heads A_ : Optional[int] = hidden_dropout A_ : List[Any] = attention_dropout A_ : Optional[int] = activation_dropout A_ : Tuple = feat_proj_dropout A_ : Union[str, Any] = final_dropout A_ : Optional[Any] = layer_norm_eps A_ : str = feature_layer_norm_eps A_ : int = initializer_range A_ : Any = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : Dict = apply_spec_augment A_ : Dict = mask_time_prob A_ : Optional[int] = mask_time_length A_ : Union[str, Any] = mask_time_min_masks A_ : int = mask_feature_prob A_ : Any = mask_feature_length A_ : List[str] = mask_feature_min_masks # ctc loss A_ : List[str] = ctc_loss_reduction A_ : str = ctc_zero_infinity # sequence classification A_ : Union[str, Any] = use_weighted_layer_sum A_ : Optional[int] = classifier_proj_size @property def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
300
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = (DDPMScheduler,) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ): '''simple docstring''' A_ : Dict = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**snake_case ) return config def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case , beta_end=snake_case ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.check_over_configs(thresholding=snake_case ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Tuple = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : List[str] = scheduler_class(**snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : int = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : int = scheduler_class(**snake_case ) A_ : Tuple = len(snake_case ) A_ : List[str] = self.dummy_model() A_ : Optional[Any] = self.dummy_sample_deter A_ : List[str] = torch.manual_seed(0 ) for t in reversed(range(snake_case ) ): # 1. predict noise residual A_ : Tuple = model(snake_case , snake_case ) # 2. predict previous mean of sample x_t-1 A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : Optional[int] = pred_prev_sample A_ : Tuple = torch.sum(torch.abs(snake_case ) ) A_ : str = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Optional[int] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config(prediction_type="v_prediction" ) A_ : List[str] = scheduler_class(**snake_case ) A_ : int = len(snake_case ) A_ : Dict = self.dummy_model() A_ : str = self.dummy_sample_deter A_ : Any = torch.manual_seed(0 ) for t in reversed(range(snake_case ) ): # 1. predict noise residual A_ : Optional[int] = model(snake_case , snake_case ) # 2. predict previous mean of sample x_t-1 A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : List[str] = pred_prev_sample A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) ) A_ : List[str] = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.scheduler_classes[0] A_ : Optional[Any] = self.get_scheduler_config() A_ : Dict = scheduler_class(**snake_case ) A_ : Optional[int] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=snake_case ) A_ : Optional[int] = scheduler.timesteps for i, timestep in enumerate(snake_case ): if i == len(snake_case ) - 1: A_ : str = -1 else: A_ : List[str] = timesteps[i + 1] A_ : Optional[int] = scheduler.previous_timestep(snake_case ) A_ : List[str] = prev_t.item() self.assertEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Optional[Any] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config() A_ : Tuple = scheduler_class(**snake_case ) A_ : List[str] = [100, 87, 50, 51, 0] with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Any = self.scheduler_classes[0] A_ : Union[str, Any] = self.get_scheduler_config() A_ : Optional[int] = scheduler_class(**snake_case ) A_ : Union[str, Any] = [100, 87, 50, 1, 0] A_ : Optional[int] = len(snake_case ) with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Union[str, Any] = self.scheduler_classes[0] A_ : Optional[Any] = self.get_scheduler_config() A_ : Optional[int] = scheduler_class(**snake_case ) A_ : Optional[int] = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=snake_case )
300
1
import cmath import math def __snake_case ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> complex: A_ : str = math.radians(_lowerCAmelCase ) A_ : int = math.radians(_lowerCAmelCase ) # Convert voltage and current to rectangular form A_ : List[str] = cmath.rect(_lowerCAmelCase , _lowerCAmelCase ) A_ : str = cmath.rect(_lowerCAmelCase , _lowerCAmelCase ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
300
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : int = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]: for attribute in key.split("." ): A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: A_ : Tuple = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Any = value elif weight_type == "bias": A_ : str = value else: A_ : Any = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]: A_ : Optional[Any] = [] A_ : Any = fairseq_model.state_dict() A_ : Union[str, Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ : str = None for name, value in fairseq_dict.items(): A_ : Tuple = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) A_ : Optional[Any] = True elif name.split("." )[0] == "proj": A_ : Dict = fairseq_model.proj A_ : List[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ : int = True if "*" in mapped_key: A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2] A_ : int = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: A_ : List[Any] = "weight_g" elif "weight_v" in name: A_ : List[Any] = "weight_v" elif "bias" in name: A_ : Dict = "bias" elif "weight" in name: A_ : List[Any] = "weight" else: A_ : Dict = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: A_ : Any = full_name.split("conv_layers." )[-1] A_ : Optional[int] = name.split("." ) A_ : Optional[Any] = int(items[0] ) A_ : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) A_ : List[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) A_ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) A_ : List[Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) A_ : Tuple = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str: A_ , A_ : List[str] = emb.weight.shape A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) A_ : List[Any] = emb.weight.data return lin_layer def __snake_case ( _lowerCAmelCase : str ) -> Tuple: with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: A_ : int = f.readlines() A_ : Dict = [line.split(" " )[0] for line in lines] A_ : Tuple = len(_lowerCAmelCase ) A_ : Union[str, Any] = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple: A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) A_ : str = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) A_ : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ : Union[str, Any] = model[0].eval() # set weights for wav2vec2 encoder A_ : Tuple = WavaVecaModel(_lowerCAmelCase ) A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase ) A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) A_ : Optional[Any] = False # add projection layer A_ : Optional[Any] = nn.Parameter(projection_layer.weight ) A_ : int = nn.Parameter(projection_layer.bias ) A_ : str = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) A_ : Optional[int] = hf_wavavec.config.to_dict() A_ : int = tokenizer.pad_token_id A_ : List[str] = tokenizer.bos_token_id A_ : List[str] = tokenizer.eos_token_id A_ : List[str] = "speech_to_text_2" A_ : Tuple = "wav2vec2" A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') _lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
300
1
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowerCAmelCase : Tuple = logging.get_logger(__name__) _lowerCAmelCase : List[str] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''} _lowerCAmelCase : Any = { '''vocab_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''', }, '''emoji_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''', }, } _lowerCAmelCase : List[Any] = { '''abeja/gpt-neox-japanese-2.7b''': 2_048, } def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Union[str, Any]: with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: A_ : Tuple = json.loads(f.read() ) A_ : Any = collections.OrderedDict() A_ : Optional[Any] = collections.OrderedDict() A_ : Dict = collections.OrderedDict() with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: A_ : List[str] = f.readlines() A_ : int = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(_lowerCAmelCase ): A_ : Tuple = b A_ : Optional[int] = idx for wd in b: A_ : Optional[Any] = idx return vocab, raw_vocab, ids_to_tokens, emoji class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self :List[Any] , snake_case :List[Any] , snake_case :List[str] , snake_case :List[Any]="<|endoftext|>" , snake_case :Dict="<|endoftext|>" , snake_case :List[Any]="<|startoftext|>" , snake_case :int="<|endoftext|>" , snake_case :Dict=False , **snake_case :List[Any] , ): '''simple docstring''' super().__init__( unk_token=snake_case , pad_token=snake_case , bos_token=snake_case , eos_token=snake_case , do_clean_text=snake_case , **snake_case , ) if not os.path.isfile(snake_case ): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(snake_case ): raise ValueError( f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google" " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) A_ : List[str] = do_clean_text A_ , A_ , A_ , A_ : List[Any] = load_vocab_and_emoji(snake_case , snake_case ) A_ : Any = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return len(self.raw_vocab ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return dict(self.raw_vocab , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Tuple ): '''simple docstring''' return self.subword_tokenizer.tokenize(snake_case , clean=self.do_clean_text ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] ): '''simple docstring''' return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Any ): '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int ): '''simple docstring''' A_ : Tuple = "".join(snake_case ).strip() return out_string def SCREAMING_SNAKE_CASE ( self :str , snake_case :"Conversation" ): '''simple docstring''' A_ : List[Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] ) if len(snake_case ) > self.model_max_length: A_ : Any = input_ids[-self.model_max_length :] return input_ids def SCREAMING_SNAKE_CASE ( self :Any , snake_case :str , snake_case :Optional[str] = None ): '''simple docstring''' A_ : int = 0 if os.path.isdir(snake_case ): A_ : Optional[int] = os.path.join( snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) A_ : Dict = os.path.join( snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: A_ : Any = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) A_ : Optional[Any] = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(snake_case , "w" , encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) A_ : Any = token_index writer.write(",".join(snake_case ) + "\n" ) index += 1 with open(snake_case , "w" , encoding="utf-8" ) as writer: json.dump(self.emoji , snake_case ) return vocab_file, emoji_file class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :List[str] , snake_case :str , snake_case :List[str] , snake_case :List[str] ): '''simple docstring''' A_ : int = vocab # same as swe A_ : int = ids_to_tokens # same as bpe A_ : Union[str, Any] = emoji A_ : Optional[int] = np.max([len(snake_case ) for w in self.vocab.keys()] ) A_ : Union[str, Any] = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) A_ : Union[str, Any] = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) A_ : Optional[Any] = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) A_ : List[Any] = re.compile( R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) A_ : str = re.compile( R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) A_ : str = re.compile( R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) A_ : List[str] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" A_ : Union[str, Any] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" A_ : List[str] = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self :Tuple ): '''simple docstring''' return len(self.ids_to_tokens ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :int ): '''simple docstring''' A_ : Union[str, Any] = self.content_repattera.sub("<URL>" , snake_case ) A_ : List[Any] = self.content_repattera.sub("<EMAIL>" , snake_case ) A_ : List[str] = self.content_repattera.sub("<TEL>" , snake_case ) A_ : List[str] = self.content_repattera.sub("<DATE>" , snake_case ) A_ : Dict = self.content_repattera.sub("<DATE>" , snake_case ) A_ : str = self.content_repattera.sub("<PRICE>" , snake_case ) A_ : Tuple = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: A_ : Optional[Any] = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" ) return content def SCREAMING_SNAKE_CASE ( self :Any , snake_case :int , snake_case :int=False ): '''simple docstring''' A_ : List[str] = text.replace(" " , "<SP>" ) A_ : Optional[int] = text.replace(" " , "<SP>" ) A_ : Optional[int] = text.replace("\r\n" , "<BR>" ) A_ : Dict = text.replace("\n" , "<BR>" ) A_ : List[str] = text.replace("\r" , "<BR>" ) A_ : List[Any] = text.replace("\t" , "<TAB>" ) A_ : List[str] = text.replace("—" , "ー" ) A_ : Union[str, Any] = text.replace("−" , "ー" ) for k, v in self.emoji["emoji"].items(): if k in text: A_ : int = text.replace(snake_case , snake_case ) if clean: A_ : Optional[Any] = self.clean_text(snake_case ) def check_simbol(snake_case :Dict ): A_ : Optional[int] = x.encode() if len(snake_case ) == 1 and len(snake_case ) == 2: A_ : Optional[Any] = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xc2_a1 and c <= 0xc2_bf) or (c >= 0xc7_80 and c <= 0xc7_83) or (c >= 0xca_b9 and c <= 0xcb_bf) or (c >= 0xcc_80 and c <= 0xcd_a2) ): return True return False def checkuae(snake_case :Dict ): A_ : Dict = x.encode() if len(snake_case ) == 1 and len(snake_case ) == 3: A_ : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xe2_80_80 and c <= 0xe2_b0_7f: return True return False A_ : Dict = 0 A_ : List[str] = [] while pos < len(snake_case ): A_ : str = min(len(snake_case ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3 A_ : Tuple = [] # (token_id, token, pos) for e in range(snake_case , snake_case , -1 ): A_ : Any = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(snake_case ) > 2: A_ : Optional[int] = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(snake_case ) > 0: # the smallest token_id is adopted A_ , A_ , A_ : Any = sorted(snake_case , key=lambda snake_case : x[0] )[0] result.append(snake_case ) A_ : Optional[Any] = e else: A_ : Union[str, Any] = pos + 1 A_ : List[Any] = text[pos:end] if check_simbol(snake_case ): result.append("<KIGOU>" ) elif checkuae(snake_case ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) A_ : Dict = end return result def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Union[str, Any] , snake_case :Dict="\n" ): '''simple docstring''' A_ : Tuple = [] A_ : Optional[int] = [] A_ : Union[str, Any] = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(snake_case ) > 0: words.append(bytearray(snake_case ).decode("utf-8" , errors="replace" ) ) A_ : str = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(snake_case ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(snake_case ) if len(snake_case ) > 0: words.append(bytearray(snake_case ).decode("utf-8" , errors="replace" ) ) A_ : Dict = "".join(snake_case ) return text
300
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : """simple docstring""" def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ): '''simple docstring''' A_ : str = parent A_ : str = batch_size A_ : str = seq_length A_ : Any = is_training A_ : Any = use_input_mask A_ : str = use_token_type_ids A_ : Tuple = use_labels A_ : Optional[Any] = vocab_size A_ : Dict = hidden_size A_ : str = num_hidden_layers A_ : Dict = num_attention_heads A_ : str = intermediate_size A_ : int = hidden_act A_ : List[Any] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : List[Any] = type_vocab_size A_ : Any = type_sequence_label_size A_ : Dict = initializer_range A_ : Any = num_labels A_ : Optional[int] = num_choices A_ : Optional[Any] = scope A_ : Any = range_bbox def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A_ : str = bbox[i, j, 3] A_ : Union[str, Any] = bbox[i, j, 1] A_ : List[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: A_ : Any = bbox[i, j, 2] A_ : Tuple = bbox[i, j, 0] A_ : int = t A_ : int = tf.convert_to_tensor(snake_case ) A_ : Any = None if self.use_input_mask: A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : str = None if self.use_token_type_ids: A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Dict = None A_ : List[Any] = None A_ : List[str] = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : str = ids_tensor([self.batch_size] , self.num_choices ) A_ : int = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ): '''simple docstring''' A_ : Any = TFLayoutLMModel(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) A_ : str = model(snake_case , snake_case , token_type_ids=snake_case ) A_ : List[Any] = model(snake_case , snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ): '''simple docstring''' A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Union[str, Any] = self.num_labels A_ : int = TFLayoutLMForSequenceClassification(config=snake_case ) A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.num_labels A_ : str = TFLayoutLMForTokenClassification(config=snake_case ) A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case ) A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = config_and_inputs A_ : Optional[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 10 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Tuple = TFLayoutLMModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def __snake_case ( ) -> Optional[Any]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs() # forward pass A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the sequence output on [0, :3, :3] A_ : List[Any] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) ) # test the pooled output on [1, :3] A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs() # forward pass A_ : Dict = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar A_ : List[str] = outputs.loss A_ : Union[str, Any] = (2,) self.assertEqual(loss.shape , snake_case ) # test the shape of the logits A_ : Tuple = outputs.logits A_ : Tuple = (2, 2) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) # test the shape of the logits A_ : Dict = outputs.logits A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the shape of the logits A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , snake_case ) self.assertEqual(outputs.end_logits.shape , snake_case )
300
1
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __snake_case ( _lowerCAmelCase : BertModel , _lowerCAmelCase : str , _lowerCAmelCase : str ) -> List[Any]: A_ : Any = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") A_ : Optional[int] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(_lowerCAmelCase ): os.makedirs(_lowerCAmelCase ) A_ : List[Any] = model.state_dict() def to_tf_var_name(_lowerCAmelCase : str ): for patt, repl in iter(_lowerCAmelCase ): A_ : Union[str, Any] = name.replace(_lowerCAmelCase , _lowerCAmelCase ) return f"bert/{name}" def create_tf_var(_lowerCAmelCase : np.ndarray , _lowerCAmelCase : str , _lowerCAmelCase : tf.Session ): A_ : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype ) A_ : Tuple = tf.get_variable(dtype=_lowerCAmelCase , shape=tensor.shape , name=_lowerCAmelCase , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(_lowerCAmelCase ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: A_ : List[Any] = to_tf_var_name(_lowerCAmelCase ) A_ : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): A_ : int = torch_tensor.T A_ : int = create_tf_var(tensor=_lowerCAmelCase , name=_lowerCAmelCase , session=_lowerCAmelCase ) tf.keras.backend.set_value(_lowerCAmelCase , _lowerCAmelCase ) A_ : int = session.run(_lowerCAmelCase ) print(f"Successfully created {tf_name}: {np.allclose(_lowerCAmelCase , _lowerCAmelCase )}" ) A_ : List[Any] = tf.train.Saver(tf.trainable_variables() ) saver.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , model_name.replace("-" , "_" ) + ".ckpt" ) ) def __snake_case ( _lowerCAmelCase : Dict=None ) -> Dict: A_ : List[str] = argparse.ArgumentParser() parser.add_argument("--model_name" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Directory in which to save tensorflow model" ) A_ : List[Any] = parser.parse_args(_lowerCAmelCase ) A_ : Dict = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=_lowerCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
300
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore _lowerCAmelCase : Optional[int] = ''' Human: <<task>> Assistant: ''' _lowerCAmelCase : int = '''huggingface-tools/default-prompts''' _lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''} def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]: if prompt_or_repo_id is None: A_ : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id A_ : Optional[Any] = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
300
1
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]: A_ : Tuple = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict: for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" ) A_ : List[Any] = in_proj_weight[ : encoder_config.hidden_size, : ] A_ : Optional[Any] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A_ : Optional[Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any: A_ : Dict = dct.pop(_lowerCAmelCase ) A_ : List[Any] = val def __snake_case ( _lowerCAmelCase : List[str] ) -> int: if "handwritten" in checkpoint_url: A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" ) return im @torch.no_grad() def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]: A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase ) A_ : Tuple = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A_ : Tuple = 768 elif "large" in checkpoint_url: # use ViT-large encoder A_ : Optional[Any] = 1024 A_ : Union[str, Any] = 4096 A_ : Union[str, Any] = 24 A_ : List[Any] = 16 A_ : List[str] = 1024 else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Dict = False A_ : int = "relu" A_ : Optional[int] = 1024 A_ : Any = True A_ : List[Any] = False A_ : Optional[int] = False # load HuggingFace model A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase ) A_ : str = TrOCRForCausalLM(_lowerCAmelCase ) A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) model.eval() # load state_dict of original model, rename some keys A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"] A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A_ : Dict = state_dict.pop(_lowerCAmelCase ) if key.startswith("decoder" ) and "output_projection" not in key: A_ : List[str] = val else: A_ : Optional[Any] = val # load state dict model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size ) A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" ) A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase ) A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values # verify logits A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ) A_ : Tuple = outputs.logits A_ : Union[str, Any] = torch.Size([1, 1, 50265] ) if "trocr-base-handwritten" in checkpoint_url: A_ : Union[str, Any] = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: A_ : str = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: A_ : Optional[Any] = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: A_ : Optional[int] = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected" Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : List[str] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
300
def __snake_case ( _lowerCAmelCase : list ) -> list: if len(_lowerCAmelCase ) <= 1: return [tuple(_lowerCAmelCase )] A_ : Tuple = [] def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ): A_ : List[str] = [0] * n res.append(tuple(_lowerCAmelCase ) ) A_ : int = 0 while i < n: if c[i] < i: if i % 2 == 0: A_ , A_ : str = arr[i], arr[0] else: A_ , A_ : List[str] = arr[i], arr[c[i]] res.append(tuple(_lowerCAmelCase ) ) c[i] += 1 A_ : Tuple = 0 else: A_ : Dict = 0 i += 1 generate(len(_lowerCAmelCase ) , _lowerCAmelCase ) return res if __name__ == "__main__": _lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip() _lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
300
1
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __magic_name__ : """simple docstring""" def __init__( self :Union[str, Any] , snake_case :Dict , snake_case :Optional[Any]=2 , snake_case :Tuple=3 , snake_case :Optional[int]=4 , snake_case :Union[str, Any]=2 , snake_case :Any=7 , snake_case :List[Any]=True , snake_case :Tuple=True , snake_case :Union[str, Any]=True , snake_case :Optional[Any]=True , snake_case :str=99 , snake_case :int=36 , snake_case :Tuple=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :Tuple="gelu" , snake_case :Optional[int]=0.1 , snake_case :Any=0.1 , snake_case :Optional[int]=512 , snake_case :int=16 , snake_case :Any=2 , snake_case :Union[str, Any]=0.02 , snake_case :Any=6 , snake_case :Optional[int]=6 , snake_case :int=3 , snake_case :str=4 , snake_case :Union[str, Any]=None , snake_case :int=1_000 , ): '''simple docstring''' A_ : int = parent A_ : Dict = batch_size A_ : Union[str, Any] = num_channels A_ : int = image_size A_ : int = patch_size A_ : Optional[Any] = is_training A_ : Any = use_input_mask A_ : Optional[int] = use_token_type_ids A_ : Dict = use_labels A_ : int = vocab_size A_ : List[str] = hidden_size A_ : List[str] = num_hidden_layers A_ : Any = num_attention_heads A_ : str = intermediate_size A_ : Union[str, Any] = hidden_act A_ : Dict = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : str = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : List[str] = type_sequence_label_size A_ : str = initializer_range A_ : List[str] = coordinate_size A_ : List[Any] = shape_size A_ : Dict = num_labels A_ : Tuple = num_choices A_ : Tuple = scope A_ : Dict = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) A_ : str = text_seq_length A_ : Union[str, Any] = (image_size // patch_size) ** 2 + 1 A_ : List[Any] = self.text_seq_length + self.image_seq_length def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) A_ : Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) A_ : Union[str, Any] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A_ : Dict = bbox[i, j, 3] A_ : Optional[int] = bbox[i, j, 1] A_ : List[str] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: A_ : int = bbox[i, j, 2] A_ : List[str] = bbox[i, j, 0] A_ : Union[str, Any] = tmp_coordinate A_ : Optional[Any] = tf.constant(snake_case ) A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : List[Any] = None if self.use_input_mask: A_ : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) A_ : Optional[Any] = None if self.use_token_type_ids: A_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) A_ : Dict = None A_ : List[Any] = None if self.use_labels: A_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) A_ : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE ( self :str , snake_case :Any , snake_case :Union[str, Any] , snake_case :Union[str, Any] , snake_case :str , snake_case :Union[str, Any] , snake_case :List[str] ): '''simple docstring''' A_ : int = TFLayoutLMvaModel(config=snake_case ) # text + image A_ : List[Any] = model(snake_case , pixel_values=snake_case , training=snake_case ) A_ : Dict = model( snake_case , bbox=snake_case , pixel_values=snake_case , attention_mask=snake_case , token_type_ids=snake_case , training=snake_case , ) A_ : Tuple = model(snake_case , bbox=snake_case , pixel_values=snake_case , training=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only A_ : List[str] = model(snake_case , training=snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only A_ : Union[str, Any] = model({"pixel_values": pixel_values} , training=snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[int] , snake_case :str , snake_case :Tuple , snake_case :int , snake_case :List[Any] , snake_case :int , snake_case :Tuple ): '''simple docstring''' A_ : Optional[Any] = self.num_labels A_ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=snake_case ) A_ : Tuple = model( snake_case , bbox=snake_case , pixel_values=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , training=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Any , snake_case :int , snake_case :Optional[Any] , snake_case :List[Any] , snake_case :List[Any] , snake_case :List[Any] , snake_case :str ): '''simple docstring''' A_ : int = self.num_labels A_ : Optional[int] = TFLayoutLMvaForTokenClassification(config=snake_case ) A_ : Union[str, Any] = model( snake_case , bbox=snake_case , pixel_values=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , training=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Union[str, Any] , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :int , snake_case :Tuple , snake_case :Tuple ): '''simple docstring''' A_ : Optional[int] = 2 A_ : List[Any] = TFLayoutLMvaForQuestionAnswering(config=snake_case ) A_ : int = model( snake_case , bbox=snake_case , pixel_values=snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , training=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Tuple = self.prepare_config_and_inputs() ((A_) , (A_) , (A_) , (A_) , (A_) , (A_) , (A_) , (A_)) : List[Any] = config_and_inputs A_ : Union[str, Any] = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) __UpperCamelCase = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :str , snake_case :Optional[Any] , snake_case :str , snake_case :Dict , snake_case :int , snake_case :List[Any] ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Optional[Any] , snake_case :Optional[Any] , snake_case :Any=False ): '''simple docstring''' A_ : Optional[Any] = copy.deepcopy(snake_case ) if model_class in get_values(snake_case ): A_ : Optional[Any] = { k: tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(snake_case , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(snake_case ): A_ : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(snake_case ): A_ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) A_ : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(snake_case ): A_ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(snake_case ): A_ : Any = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Tuple = TFLayoutLMvaModelTester(self ) A_ : Optional[int] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(snake_case ) if getattr(snake_case , "hf_compute_loss" , snake_case ): # The number of elements in the loss should be the same as the number of elements in the label A_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , snake_case , return_labels=snake_case ) A_ : int = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=snake_case )[0] ] A_ : str = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs A_ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , snake_case , return_labels=snake_case ) A_ : Any = prepared_for_class.pop("input_ids" ) A_ : List[Any] = model(snake_case , **snake_case )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions A_ : Tuple = self._prepare_for_class(inputs_dict.copy() , snake_case , return_labels=snake_case ) A_ : Union[str, Any] = prepared_for_class.pop("input_ids" ) if "labels" in prepared_for_class: A_ : Any = prepared_for_class["labels"].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: A_ : Optional[Any] = -100 A_ : int = tf.convert_to_tensor(snake_case ) A_ : int = model(snake_case , **snake_case )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict A_ : Tuple = self._prepare_for_class(inputs_dict.copy() , snake_case , return_labels=snake_case ) A_ : int = model(snake_case )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple A_ : Dict = self._prepare_for_class(inputs_dict.copy() , snake_case , return_labels=snake_case ) # Get keys that were added with the _prepare_for_class function A_ : Any = prepared_for_class.keys() - inputs_dict.keys() A_ : Optional[Any] = inspect.signature(model.call ).parameters A_ : Optional[int] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple A_ : Tuple = {0: "input_ids"} for label_key in label_keys: A_ : Optional[Any] = signature_names.index(snake_case ) A_ : Tuple = label_key A_ : List[str] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple A_ : Optional[int] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: A_ : Optional[int] = prepared_for_class[value] A_ : Union[str, Any] = tuple(snake_case ) # Send to model A_ : int = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A_ : List[Any] = type self.model_tester.create_and_check_model(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : int = TFLayoutLMvaModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __snake_case ( ) -> Optional[Any]: A_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=snake_case ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ) A_ : Optional[Any] = self.default_image_processor A_ : str = prepare_img() A_ : Tuple = image_processor(images=snake_case , return_tensors="tf" ).pixel_values A_ : List[str] = tf.constant([[1, 2]] ) A_ : Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass A_ : List[str] = model(input_ids=snake_case , bbox=snake_case , pixel_values=snake_case , training=snake_case ) # verify the logits A_ : Tuple = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , snake_case ) A_ : str = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ) )
300
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer _lowerCAmelCase : int = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase : List[Any] = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } _lowerCAmelCase : Any = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = RobertaTokenizer def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ): '''simple docstring''' super().__init__( snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , ) A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space: A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) ) A_ : Optional[int] = add_prefix_space A_ : int = pre_tok_class(**snake_case ) A_ : Optional[int] = add_prefix_space A_ : Optional[int] = "post_processor" A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case ) if tokenizer_component_instance: A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : List[Any] = tuple(state["sep"] ) if "cls" in state: A_ : Optional[Any] = tuple(state["cls"] ) A_ : Tuple = False if state.get("add_prefix_space" , snake_case ) != add_prefix_space: A_ : List[Any] = add_prefix_space A_ : Optional[int] = True if state.get("trim_offsets" , snake_case ) != trim_offsets: A_ : List[str] = trim_offsets A_ : Any = True if changes_to_apply: A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) ) A_ : Any = component_class(**snake_case ) setattr(self.backend_tokenizer , snake_case , snake_case ) @property def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ): '''simple docstring''' A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value A_ : Any = value def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ): '''simple docstring''' A_ : Any = kwargs.get("is_split_into_words" , snake_case ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ): '''simple docstring''' A_ : Any = kwargs.get("is_split_into_words" , snake_case ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ): '''simple docstring''' A_ : str = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ): '''simple docstring''' A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ): '''simple docstring''' A_ : Any = [self.sep_token_id] A_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
300
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[int] = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys _lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
300
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCAmelCase : int = '''\ @misc{wu2016googles, title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } ''' _lowerCAmelCase : Tuple = '''\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the \'GLEU score\'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score\'s range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. ''' _lowerCAmelCase : int = '''\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: \'google_bleu\': google_bleu score Examples: Example 1: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.44 Example 2: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.61 Example 3: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results["google_bleu"], 2)) 0.53 Example 4: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results["google_bleu"], 2)) 0.4 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case ) }
300
1
from __future__ import annotations _lowerCAmelCase : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _lowerCAmelCase : int = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def __snake_case ( _lowerCAmelCase : list[float] ) -> list[float]: A_ : Optional[int] = [] A_ : Any = len(_lowerCAmelCase ) for i in range(_lowerCAmelCase ): A_ : float = -1 for j in range(i + 1 , _lowerCAmelCase ): if arr[i] < arr[j]: A_ : Optional[Any] = arr[j] break result.append(_lowerCAmelCase ) return result def __snake_case ( _lowerCAmelCase : list[float] ) -> list[float]: A_ : Union[str, Any] = [] for i, outer in enumerate(_lowerCAmelCase ): A_ : float = -1 for inner in arr[i + 1 :]: if outer < inner: A_ : List[Any] = inner break result.append(_lowerCAmelCase ) return result def __snake_case ( _lowerCAmelCase : list[float] ) -> list[float]: A_ : Optional[Any] = len(_lowerCAmelCase ) A_ : list[float] = [] A_ : list[float] = [-1] * arr_size for index in reversed(range(_lowerCAmelCase ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: A_ : int = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _lowerCAmelCase : int = ( '''from __main__ import arr, next_greatest_element_slow, ''' '''next_greatest_element_fast, next_greatest_element''' ) print( '''next_greatest_element_slow():''', timeit('''next_greatest_element_slow(arr)''', setup=setup), ) print( '''next_greatest_element_fast():''', timeit('''next_greatest_element_fast(arr)''', setup=setup), ) print( ''' next_greatest_element():''', timeit('''next_greatest_element(arr)''', setup=setup), )
300
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]: A_ : Tuple = tmp_path / "cache" A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str: A_ : List[Any] = tmp_path / "cache" A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : int = features.copy() if features else default_expected_features A_ : str = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]: A_ : Dict = tmp_path / "cache" A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]: if issubclass(_lowerCAmelCase , _lowerCAmelCase ): A_ : int = parquet_path elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): A_ : Optional[int] = [parquet_path] A_ : Optional[int] = tmp_path / "cache" A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) for split in splits: A_ : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]: A_ : Optional[Any] = tmp_path / "cache" A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ : Union[str, Any] = ParquetDatasetReader( {"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple: A_ : Optional[Any] = tmp_path / "cache" A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : List[str] = features.copy() if features else default_expected_features A_ : Tuple = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]: if split: A_ : Any = {split: parquet_path} else: A_ : Optional[Any] = "train" A_ : str = {"train": parquet_path, "test": parquet_path} A_ : Any = tmp_path / "cache" A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict: A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) A_ : Dict = pf.read() assert dataset.data.table == output_table def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]: A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" ) A_ : int = {"image": [image_path]} A_ : Optional[Any] = Features({"image": Image()} ) A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase ) A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any: assert get_writer_batch_size(_lowerCAmelCase ) == expected
300
1
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __snake_case ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ) -> float: return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ) ) ) def __snake_case ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ) -> list[list[list[float] | float]]: if dataset.ndim != value_array.ndim: A_ : Optional[int] = ( "Wrong input data's dimensions... " f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) raise ValueError(_lowerCAmelCase ) try: if dataset.shape[1] != value_array.shape[1]: A_ : Optional[Any] = ( "Wrong input data's shape... " f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) raise ValueError(_lowerCAmelCase ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape" ) if dataset.dtype != value_array.dtype: A_ : Optional[int] = ( "Input data have different datatype... " f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) raise TypeError(_lowerCAmelCase ) A_ : Union[str, Any] = [] for value in value_array: A_ : Optional[int] = euclidean(_lowerCAmelCase , dataset[0] ) A_ : List[str] = dataset[0].tolist() for dataset_value in dataset[1:]: A_ : Optional[Any] = euclidean(_lowerCAmelCase , _lowerCAmelCase ) if dist > temp_dist: A_ : Tuple = temp_dist A_ : str = dataset_value.tolist() answer.append([vector, dist] ) return answer def __snake_case ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ) -> float: return np.dot(_lowerCAmelCase , _lowerCAmelCase ) / (norm(_lowerCAmelCase ) * norm(_lowerCAmelCase )) if __name__ == "__main__": import doctest doctest.testmod()
300
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int: with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f: A_ : Optional[int] = json.load(_lowerCAmelCase ) A_ : Union[str, Any] = {} A_ : Tuple = [] A_ : Optional[Any] = [] for key, info in class_info.items(): A_ : Tuple = info["name"] class_names.append(info["name"] ) if info["isthing"]: thing_ids.append(int(_lowerCAmelCase ) ) A_ : Optional[Any] = thing_ids A_ : int = class_names return metadata class __magic_name__ ( unittest.TestCase ): """simple docstring""" def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ): '''simple docstring''' A_ : Tuple = parent A_ : List[str] = batch_size A_ : Optional[int] = num_channels A_ : Tuple = min_resolution A_ : List[Any] = max_resolution A_ : Union[str, Any] = do_resize A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size A_ : Tuple = do_normalize A_ : List[str] = image_mean A_ : List[Any] = image_std A_ : Union[str, Any] = class_info_file A_ : List[Any] = prepare_metadata(snake_case , snake_case ) A_ : Tuple = num_text A_ : str = repo_path # for the post_process_functions A_ : Any = 2 A_ : int = 10 A_ : Optional[int] = 10 A_ : Tuple = 3 A_ : Tuple = 4 A_ : str = num_labels A_ : int = do_reduce_labels A_ : List[Any] = ignore_index def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ): '''simple docstring''' if not batched: A_ : List[str] = image_inputs[0] if isinstance(snake_case , Image.Image ): A_ , A_ : Dict = image.size else: A_ , A_ : Tuple = image.shape[1], image.shape[2] if w < h: A_ : str = int(self.size["shortest_edge"] * h / w ) A_ : Any = self.size["shortest_edge"] elif w > h: A_ : Optional[int] = self.size["shortest_edge"] A_ : List[str] = int(self.size["shortest_edge"] * w / h ) else: A_ : List[str] = self.size["shortest_edge"] A_ : Optional[Any] = self.size["shortest_edge"] else: A_ : Tuple = [] for image in image_inputs: A_ , A_ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0] A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1] return expected_height, expected_width def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __UpperCamelCase = image_processing_class def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Union[str, Any] = OneFormerImageProcessorTester(self ) @property def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , "image_mean" ) ) self.assertTrue(hasattr(snake_case , "image_std" ) ) self.assertTrue(hasattr(snake_case , "do_normalize" ) ) self.assertTrue(hasattr(snake_case , "do_resize" ) ) self.assertTrue(hasattr(snake_case , "size" ) ) self.assertTrue(hasattr(snake_case , "ignore_index" ) ) self.assertTrue(hasattr(snake_case , "class_info_file" ) ) self.assertTrue(hasattr(snake_case , "num_text" ) ) self.assertTrue(hasattr(snake_case , "repo_path" ) ) self.assertTrue(hasattr(snake_case , "metadata" ) ) self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : List[str] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : Optional[Any] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : Any = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ): '''simple docstring''' A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # prepare image and target A_ : Tuple = self.image_processing_tester.num_labels A_ : str = None A_ : Tuple = None A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) if with_segmentation_maps: A_ : List[str] = num_labels if is_instance_map: A_ : List[str] = list(range(snake_case ) ) * 2 A_ : int = dict(enumerate(snake_case ) ) A_ : List[str] = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": A_ : int = [Image.fromarray(snake_case ) for annotation in annotations] A_ : List[str] = image_processor( snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , ) return inputs def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' def common(snake_case :Dict=False , snake_case :Optional[int]=None ): A_ : Tuple = self.comm_get_image_processor_inputs( with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case ) A_ : Optional[Any] = inputs["mask_labels"] A_ : List[Any] = inputs["class_labels"] A_ : Optional[Any] = inputs["pixel_values"] A_ : int = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text ) common() common(is_instance_map=snake_case ) common(is_instance_map=snake_case , segmentation_type="pil" ) common(is_instance_map=snake_case , segmentation_type="pil" ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = np.zeros((20, 50) ) A_ : List[str] = 1 A_ : int = 1 A_ : Optional[Any] = 1 A_ : Any = binary_mask_to_rle(snake_case ) self.assertEqual(len(snake_case ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs() A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case ) self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : str = self.image_processing_tester.get_fake_oneformer_outputs() A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Tuple = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs() A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
300
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer _lowerCAmelCase : Any = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase : str = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } _lowerCAmelCase : Optional[int] = { '''bert-base-uncased''': 512, '''bert-large-uncased''': 512, '''bert-base-cased''': 512, '''bert-large-cased''': 512, '''bert-base-multilingual-uncased''': 512, '''bert-base-multilingual-cased''': 512, '''bert-base-chinese''': 512, '''bert-base-german-cased''': 512, '''bert-large-uncased-whole-word-masking''': 512, '''bert-large-cased-whole-word-masking''': 512, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 512, '''bert-large-cased-whole-word-masking-finetuned-squad''': 512, '''bert-base-cased-finetuned-mrpc''': 512, '''bert-base-german-dbmdz-cased''': 512, '''bert-base-german-dbmdz-uncased''': 512, '''TurkuNLP/bert-base-finnish-cased-v1''': 512, '''TurkuNLP/bert-base-finnish-uncased-v1''': 512, '''wietsedv/bert-base-dutch-cased''': 512, } _lowerCAmelCase : int = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BertTokenizer def __init__( self :int , snake_case :Optional[Any]=None , snake_case :Optional[Any]=None , snake_case :Optional[int]=True , snake_case :Optional[int]="[UNK]" , snake_case :List[Any]="[SEP]" , snake_case :List[Any]="[PAD]" , snake_case :Dict="[CLS]" , snake_case :Dict="[MASK]" , snake_case :Tuple=True , snake_case :Optional[Any]=None , **snake_case :Union[str, Any] , ): '''simple docstring''' super().__init__( snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , ) A_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars ): A_ : Union[str, Any] = getattr(snake_case , normalizer_state.pop("type" ) ) A_ : Dict = do_lower_case A_ : List[Any] = strip_accents A_ : Optional[Any] = tokenize_chinese_chars A_ : List[Any] = normalizer_class(**snake_case ) A_ : Union[str, Any] = do_lower_case def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Any , snake_case :Union[str, Any]=None ): '''simple docstring''' A_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE ( self :str , snake_case :List[int] , snake_case :Optional[List[int]] = None ): '''simple docstring''' A_ : Dict = [self.sep_token_id] A_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :str , snake_case :Optional[str] = None ): '''simple docstring''' A_ : Any = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case )
300
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''data2vec-vision''' def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ): '''simple docstring''' super().__init__(**snake_case ) A_ : Dict = hidden_size A_ : Tuple = num_hidden_layers A_ : List[str] = num_attention_heads A_ : Any = intermediate_size A_ : Optional[Any] = hidden_act A_ : Any = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Optional[Any] = initializer_range A_ : List[str] = layer_norm_eps A_ : str = image_size A_ : Optional[int] = patch_size A_ : int = num_channels A_ : Optional[Any] = use_mask_token A_ : Optional[Any] = use_absolute_position_embeddings A_ : Optional[int] = use_relative_position_bias A_ : Dict = use_shared_relative_position_bias A_ : Any = layer_scale_init_value A_ : Optional[Any] = drop_path_rate A_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Tuple = out_indices A_ : Optional[Any] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : str = use_auxiliary_head A_ : List[Any] = auxiliary_loss_weight A_ : List[str] = auxiliary_channels A_ : Dict = auxiliary_num_convs A_ : List[str] = auxiliary_concat_input A_ : Optional[int] = semantic_loss_ignore_index class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return 1e-4
300
1
from abc import ABC, abstractmethod from argparse import ArgumentParser class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" @staticmethod @abstractmethod def SCREAMING_SNAKE_CASE ( snake_case :ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' raise NotImplementedError()
300
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging _lowerCAmelCase : str = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = ['''input_features''', '''attention_mask'''] def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ): '''simple docstring''' super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case ) A_ : Union[str, Any] = feature_size A_ : int = sampling_rate A_ : str = padding_value A_ : int = hop_length A_ : List[str] = win_length A_ : Any = frame_signal_scale A_ : str = preemphasis_coeff A_ : List[str] = mel_floor A_ : str = normalize_means A_ : Any = normalize_vars A_ : Optional[Any] = win_function A_ : Dict = return_attention_mask A_ : List[str] = win_length * sampling_rate // 1_000 A_ : List[str] = hop_length * sampling_rate // 1_000 A_ : List[str] = optimal_fft_length(self.sample_size ) A_ : str = (self.n_fft // 2) + 1 def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ): '''simple docstring''' if self.win_function == "hamming_window": A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case ) else: A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function ) A_ : Optional[int] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) A_ : Tuple = spectrogram( one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , ) return msfc_features.T def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ): '''simple docstring''' if self.normalize_means: A_ : int = x[:input_length].mean(axis=0 ) A_ : Any = np.subtract(snake_case , snake_case ) if self.normalize_vars: A_ : List[Any] = x[:input_length].std(axis=0 ) A_ : Optional[int] = np.divide(snake_case , snake_case ) if input_length < x.shape[0]: A_ : Optional[int] = padding_value # make sure array is in float32 A_ : Union[str, Any] = x.astype(np.floataa ) return x def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ): '''simple docstring''' A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )] def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) A_ : Optional[Any] = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): A_ : int = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Tuple = [raw_speech] # extract fbank features A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech] # convert into correct format for padding A_ : Union[str, Any] = BatchFeature({"input_features": features} ) A_ : str = self.pad( snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , ) # make sure list is in array format A_ : Optional[int] = padded_inputs.get("input_features" ) if isinstance(input_features[0] , snake_case ): A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features] A_ : Dict = padded_inputs.get("attention_mask" ) if attention_mask is not None: A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: A_ : Dict = ( np.array(snake_case , dtype=np.intaa ) if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) A_ : Optional[int] = self.normalize( padded_inputs["input_features"] , attention_mask=snake_case ) if return_tensors is not None: A_ : Dict = padded_inputs.convert_to_tensors(snake_case ) return padded_inputs
300
1
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int: with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f: A_ : Optional[int] = json.load(_lowerCAmelCase ) A_ : Union[str, Any] = {} A_ : Tuple = [] A_ : Optional[Any] = [] for key, info in class_info.items(): A_ : Tuple = info["name"] class_names.append(info["name"] ) if info["isthing"]: thing_ids.append(int(_lowerCAmelCase ) ) A_ : Optional[Any] = thing_ids A_ : int = class_names return metadata class __magic_name__ ( unittest.TestCase ): """simple docstring""" def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ): '''simple docstring''' A_ : Tuple = parent A_ : List[str] = batch_size A_ : Optional[int] = num_channels A_ : Tuple = min_resolution A_ : List[Any] = max_resolution A_ : Union[str, Any] = do_resize A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size A_ : Tuple = do_normalize A_ : List[str] = image_mean A_ : List[Any] = image_std A_ : Union[str, Any] = class_info_file A_ : List[Any] = prepare_metadata(snake_case , snake_case ) A_ : Tuple = num_text A_ : str = repo_path # for the post_process_functions A_ : Any = 2 A_ : int = 10 A_ : Optional[int] = 10 A_ : Tuple = 3 A_ : Tuple = 4 A_ : str = num_labels A_ : int = do_reduce_labels A_ : List[Any] = ignore_index def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ): '''simple docstring''' if not batched: A_ : List[str] = image_inputs[0] if isinstance(snake_case , Image.Image ): A_ , A_ : Dict = image.size else: A_ , A_ : Tuple = image.shape[1], image.shape[2] if w < h: A_ : str = int(self.size["shortest_edge"] * h / w ) A_ : Any = self.size["shortest_edge"] elif w > h: A_ : Optional[int] = self.size["shortest_edge"] A_ : List[str] = int(self.size["shortest_edge"] * w / h ) else: A_ : List[str] = self.size["shortest_edge"] A_ : Optional[Any] = self.size["shortest_edge"] else: A_ : Tuple = [] for image in image_inputs: A_ , A_ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0] A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1] return expected_height, expected_width def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __UpperCamelCase = image_processing_class def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Union[str, Any] = OneFormerImageProcessorTester(self ) @property def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , "image_mean" ) ) self.assertTrue(hasattr(snake_case , "image_std" ) ) self.assertTrue(hasattr(snake_case , "do_normalize" ) ) self.assertTrue(hasattr(snake_case , "do_resize" ) ) self.assertTrue(hasattr(snake_case , "size" ) ) self.assertTrue(hasattr(snake_case , "ignore_index" ) ) self.assertTrue(hasattr(snake_case , "class_info_file" ) ) self.assertTrue(hasattr(snake_case , "num_text" ) ) self.assertTrue(hasattr(snake_case , "repo_path" ) ) self.assertTrue(hasattr(snake_case , "metadata" ) ) self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : List[str] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : Optional[Any] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) A_ : Any = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ): '''simple docstring''' A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # prepare image and target A_ : Tuple = self.image_processing_tester.num_labels A_ : str = None A_ : Tuple = None A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) if with_segmentation_maps: A_ : List[str] = num_labels if is_instance_map: A_ : List[str] = list(range(snake_case ) ) * 2 A_ : int = dict(enumerate(snake_case ) ) A_ : List[str] = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": A_ : int = [Image.fromarray(snake_case ) for annotation in annotations] A_ : List[str] = image_processor( snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , ) return inputs def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' def common(snake_case :Dict=False , snake_case :Optional[int]=None ): A_ : Tuple = self.comm_get_image_processor_inputs( with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case ) A_ : Optional[Any] = inputs["mask_labels"] A_ : List[Any] = inputs["class_labels"] A_ : Optional[Any] = inputs["pixel_values"] A_ : int = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text ) common() common(is_instance_map=snake_case ) common(is_instance_map=snake_case , segmentation_type="pil" ) common(is_instance_map=snake_case , segmentation_type="pil" ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = np.zeros((20, 50) ) A_ : List[str] = 1 A_ : int = 1 A_ : Optional[Any] = 1 A_ : Any = binary_mask_to_rle(snake_case ) self.assertEqual(len(snake_case ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs() A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case ) self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : str = self.image_processing_tester.get_fake_oneformer_outputs() A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Tuple = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs() A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
300
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' super().__init__() A_ : Tuple = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" f" `n_embd`: {n_embd} are not equal." ) A_ : List[Any] = prefix_inner_dim A_ : Union[str, Any] = prefix_hidden_dim A_ : List[str] = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = ( nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = GPTaConfig( vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , ) A_ : Optional[Any] = GPTaLMHeadModel(snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ): '''simple docstring''' A_ : Any = self.transformer.transformer.wte(snake_case ) A_ : str = self.encode_prefix(snake_case ) A_ : Union[str, Any] = self.decode_prefix(snake_case ) A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A_ : int = torch.cat((dummy_token, input_ids) , dim=1 ) A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ): '''simple docstring''' return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ): '''simple docstring''' return self.encode_prefix(snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Any = torch.split(snake_case , 1 , dim=0 ) A_ : Optional[int] = [] A_ : Union[str, Any] = [] for feature in features: A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature # Only support beam search for now A_ , A_ : Dict = self.generate_beam( input_embeds=snake_case , device=snake_case , eos_token_id=snake_case ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A_ : int = torch.stack(snake_case ) A_ : int = torch.stack(snake_case ) return generated_tokens, generated_seq_lengths @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ): '''simple docstring''' A_ : Optional[Any] = eos_token_id A_ : List[Any] = None A_ : List[Any] = None A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int ) A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool ) if input_embeds is not None: A_ : Any = input_embeds else: A_ : Optional[Any] = self.transformer.transformer.wte(snake_case ) for i in range(snake_case ): A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case ) A_ : str = outputs.logits A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A_ : List[str] = logits.softmax(-1 ).log() if scores is None: A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 ) A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] ) A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A_ : Union[str, Any] = next_tokens else: A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] ) A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 ) else: A_ : List[str] = -float(np.inf ) A_ : List[Any] = 0 A_ : Union[str, Any] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 A_ : Optional[Any] = scores_sum / seq_lengths[:, None] A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 ) A_ : str = next_tokens // scores_sum.shape[1] A_ : Union[str, Any] = seq_lengths[next_tokens_source] A_ : Optional[int] = next_tokens % scores_sum.shape[1] A_ : Tuple = next_tokens.unsqueeze(1 ) A_ : Tuple = tokens[next_tokens_source] A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 ) A_ : Dict = generated[next_tokens_source] A_ : Union[str, Any] = scores_sum_average * seq_lengths A_ : Optional[int] = is_stopped[next_tokens_source] A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 ) A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze() if is_stopped.all(): break A_ : int = scores / seq_lengths A_ : str = scores.argsort(descending=snake_case ) # tokens tensors are already padded to max_seq_length A_ : Dict = [tokens[i] for i in order] A_ : int = torch.stack(snake_case , dim=0 ) A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
300
1
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class __magic_name__ : """simple docstring""" def __init__( self :Any , snake_case :int , ): '''simple docstring''' A_ : Tuple = parent A_ : int = 13 A_ : Any = 7 A_ : Any = True A_ : List[str] = True A_ : Optional[int] = True A_ : List[Any] = 99 A_ : int = 32 A_ : List[str] = 2 A_ : Optional[Any] = 4 A_ : Optional[int] = 37 A_ : str = "gelu" A_ : int = 0.1 A_ : Tuple = 0.1 A_ : Tuple = 512 A_ : Optional[Any] = 16 A_ : Tuple = 2 A_ : Any = 0.02 A_ : Dict = 3 A_ : List[Any] = 4 A_ : str = None def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Dict = None if self.use_input_mask: A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Optional[Any] = None A_ : int = None A_ : str = None if self.use_labels: A_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) A_ : List[Any] = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Optional[Any] = self.prepare_config_and_inputs() A_ : Optional[Any] = True A_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A_ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Any , snake_case :List[str] , snake_case :Any , snake_case :Optional[Any] , snake_case :Any , snake_case :Optional[int] ): '''simple docstring''' A_ : List[Any] = TFEsmModel(config=snake_case ) A_ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask} A_ : Dict = model(snake_case ) A_ : Any = [input_ids, input_mask] A_ : Optional[int] = model(snake_case ) A_ : List[str] = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Optional[int] , snake_case :Dict , snake_case :List[Any] , snake_case :Optional[int] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :int , snake_case :str , ): '''simple docstring''' A_ : Any = True A_ : str = TFEsmModel(config=snake_case ) A_ : List[str] = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } A_ : str = model(snake_case ) A_ : Optional[int] = [input_ids, input_mask] A_ : Union[str, Any] = model(snake_case , encoder_hidden_states=snake_case ) # Also check the case where encoder outputs are not passed A_ : str = model(snake_case , attention_mask=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Union[str, Any] , snake_case :Optional[int] , snake_case :Tuple , snake_case :Tuple , snake_case :Optional[Any] , snake_case :Tuple ): '''simple docstring''' A_ : int = TFEsmForMaskedLM(config=snake_case ) A_ : Optional[Any] = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :List[Any] , snake_case :str , snake_case :Any , snake_case :List[str] , snake_case :Tuple , snake_case :List[Any] ): '''simple docstring''' A_ : str = self.num_labels A_ : Any = TFEsmForTokenClassification(config=snake_case ) A_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask} A_ : List[Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : List[str] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = config_and_inputs A_ : Any = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Any = TFEsmModelTester(self ) A_ : str = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Any = TFEsmModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip("Protein models do not support embedding resizing." ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' pass @unittest.skip("Protein models do not support embedding resizing." ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(snake_case ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer A_ : Dict = model.get_bias() assert isinstance(snake_case , snake_case ) for k, v in name.items(): assert isinstance(snake_case , tf.Variable ) else: A_ : str = model.get_output_embeddings() assert x is None A_ : Dict = model.get_bias() assert name is None @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : List[Any] = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) A_ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] ) A_ : List[str] = model(snake_case )[0] A_ : Union[str, Any] = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , snake_case ) # compare the actual values for a slice. A_ : List[str] = tf.constant( [ [ [8.921518, -10.589814, -6.4671307], [-6.3967156, -13.911377, -1.1211915], [-7.781247, -13.951557, -3.740592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : List[str] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) A_ : Dict = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A_ : Optional[int] = model(snake_case )[0] # compare the actual values for a slice. A_ : List[Any] = tf.constant( [ [ [0.14443092, 0.54125327, 0.3247739], [0.30340484, 0.00526676, 0.31077722], [0.32278043, -0.24987096, 0.3414628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
300
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowerCAmelCase : Tuple = logging.get_logger(__name__) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ): '''simple docstring''' warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , snake_case , ) super().__init__(*snake_case , **snake_case )
300
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata _lowerCAmelCase : List[Any] = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class __magic_name__ ( tr.AbstractTransform ): """simple docstring""" def __init__( self :Any , snake_case :str = " " ): '''simple docstring''' A_ : List[Any] = sentence_delimiter def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :str ): '''simple docstring''' return list(snake_case ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[str] ): '''simple docstring''' A_ : Dict = [] for sent_idx, sentence in enumerate(snake_case ): chars.extend(self.process_string(snake_case ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(snake_case ) - 1: chars.append(self.sentence_delimiter ) return chars _lowerCAmelCase : Dict = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: _lowerCAmelCase : Any = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) _lowerCAmelCase : List[Any] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' _lowerCAmelCase : Union[str, Any] = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' _lowerCAmelCase : List[Any] = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", "https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates", ] , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[Any] , snake_case :Optional[int] , snake_case :int=False ): '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( snake_case , snake_case , truth_transform=snake_case , hypothesis_transform=snake_case , )["wer"] A_ : List[str] = 0 A_ : Any = 0 for prediction, reference in zip(snake_case , snake_case ): A_ : Any = jiwer.compute_measures( snake_case , snake_case , truth_transform=snake_case , hypothesis_transform=snake_case , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
300
from __future__ import annotations def __snake_case ( _lowerCAmelCase : list[float] ) -> bool: if len(_lowerCAmelCase ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) A_ : List[str] = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
300
1
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = KandinskyVaaControlnetPipeline __UpperCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] __UpperCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] __UpperCamelCase = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] __UpperCamelCase = False @property def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return 32 @property def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return 32 @property def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' return self.time_input_dim @property def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return 100 @property def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' torch.manual_seed(0 ) A_ : Union[str, Any] = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } A_ : List[Any] = UNetaDConditionModel(**snake_case ) return model @property def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' torch.manual_seed(0 ) A_ : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : List[Any] = self.dummy_unet A_ : Union[str, Any] = self.dummy_movq A_ : List[str] = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=snake_case , set_alpha_to_one=snake_case , steps_offset=1 , prediction_type="epsilon" , thresholding=snake_case , ) A_ : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[Any] , snake_case :Optional[int]=0 ): '''simple docstring''' A_ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case ) ).to(snake_case ) A_ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case ) # create hint A_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case ) ).to(snake_case ) if str(snake_case ).startswith("mps" ): A_ : str = torch.manual_seed(snake_case ) else: A_ : Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case ) A_ : int = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Union[str, Any] = "cpu" A_ : List[str] = self.get_dummy_components() A_ : Union[str, Any] = self.pipeline_class(**snake_case ) A_ : Optional[Any] = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) A_ : Any = pipe(**self.get_dummy_inputs(snake_case ) ) A_ : Dict = output.images A_ : Union[str, Any] = pipe( **self.get_dummy_inputs(snake_case ) , return_dict=snake_case , )[0] A_ : Union[str, Any] = image[0, -3:, -3:, -1] A_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : int = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" ) A_ : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) A_ : Optional[int] = torch.from_numpy(np.array(snake_case ) ).float() / 255.0 A_ : List[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) A_ : List[str] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(snake_case ) A_ : Optional[int] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) A_ : Dict = pipeline.to(snake_case ) pipeline.set_progress_bar_config(disable=snake_case ) A_ : int = "A robot, 4k photo" A_ : Tuple = torch.Generator(device="cuda" ).manual_seed(0 ) A_ , A_ : List[str] = pipe_prior( snake_case , generator=snake_case , num_inference_steps=5 , negative_prompt="" , ).to_tuple() A_ : List[Any] = torch.Generator(device="cuda" ).manual_seed(0 ) A_ : Tuple = pipeline( image_embeds=snake_case , negative_image_embeds=snake_case , hint=snake_case , generator=snake_case , num_inference_steps=100 , output_type="np" , ) A_ : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(snake_case , snake_case )
300
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging _lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ): '''simple docstring''' super().__init__() self.register_modules( vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A_ : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' self.enable_attention_slicing(snake_case ) @torch.no_grad() def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ): '''simple docstring''' if isinstance(snake_case , snake_case ): A_ : Dict = 1 elif isinstance(snake_case , snake_case ): A_ : Optional[Any] = len(snake_case ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(snake_case )}." ) # get prompt text embeddings A_ : int = self.tokenizer( snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) A_ : Dict = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A_ , A_ , A_ : int = text_embeddings.shape A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 ) A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A_ : Dict = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A_ : List[str] if negative_prompt is None: A_ : List[str] = [""] elif type(snake_case ) is not type(snake_case ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !=" f" {type(snake_case )}." ) elif isinstance(snake_case , snake_case ): A_ : Optional[Any] = [negative_prompt] elif batch_size != len(snake_case ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: A_ : Any = negative_prompt A_ : Optional[int] = text_input_ids.shape[-1] A_ : Dict = self.tokenizer( snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , ) A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A_ : Tuple = uncond_embeddings.shape[1] A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 ) A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) A_ : List[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A_ : Tuple = torch.randn( snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device ) A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to( self.device ) else: A_ : int = torch.randn( snake_case , generator=snake_case , device=self.device , dtype=snake_case ) A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case ) else: if latents_reference.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) A_ : Tuple = latents_reference.to(self.device ) A_ : Any = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2 A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2 A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy A_ : Optional[Any] = 0 if dx < 0 else dx A_ : Optional[Any] = 0 if dy < 0 else dy A_ : List[str] = max(-dx , 0 ) A_ : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A_ : str = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ : List[str] = {} if accepts_eta: A_ : Union[str, Any] = eta for i, t in enumerate(self.progress_bar(snake_case ) ): # expand the latents if we are doing classifier free guidance A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case ) # predict the noise residual A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample # perform guidance if do_classifier_free_guidance: A_ , A_ : Dict = noise_pred.chunk(2 ) A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case , snake_case , snake_case ) A_ : List[str] = 1 / 0.18215 * latents A_ : Tuple = self.vae.decode(snake_case ).sample A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to( self.device ) A_ , A_ : List[str] = self.safety_checker( images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: A_ : List[str] = None if output_type == "pil": A_ : Optional[int] = self.numpy_to_pil(snake_case ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
300
1
from __future__ import annotations def __snake_case ( _lowerCAmelCase : list[float] ) -> float: A_ : List[str] = 0.00 A_ : Optional[Any] = 0 for resistor in resistors: if resistor <= 0: A_ : Union[str, Any] = f"Resistor at index {index} has a negative or zero value!" raise ValueError(_lowerCAmelCase ) first_sum += 1 / float(_lowerCAmelCase ) index += 1 return 1 / first_sum def __snake_case ( _lowerCAmelCase : list[float] ) -> float: A_ : int = 0.00 A_ : List[str] = 0 for resistor in resistors: sum_r += resistor if resistor < 0: A_ : Union[str, Any] = f"Resistor at index {index} has a negative value!" raise ValueError(_lowerCAmelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
300
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict: A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase ) A_ : List[str] = nn.functional.normalize(_lowerCAmelCase ) return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = CLIPConfig __UpperCamelCase = ['''CLIPEncoderLayer'''] def __init__( self :int , snake_case :CLIPConfig ): '''simple docstring''' super().__init__(snake_case ) A_ : int = CLIPVisionModel(config.vision_config ) A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case ) A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case ) A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case ) A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case ) A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ): '''simple docstring''' A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output A_ : List[Any] = self.visual_projection(snake_case ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy() A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy() A_ : Union[str, Any] = [] A_ : Any = image_embeds.shape[0] for i in range(snake_case ): A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A_ : Optional[Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A_ : Optional[Any] = special_cos_dist[i][concept_idx] A_ : Tuple = self.special_care_embeds_weights[concept_idx].item() A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A_ : Any = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A_ : Tuple = cos_dist[i][concept_idx] A_ : Tuple = self.concept_embeds_weights[concept_idx].item() A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(snake_case ) result.append(snake_case ) A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ): '''simple docstring''' A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output A_ : int = self.visual_projection(snake_case ) A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds ) A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A_ : Optional[Any] = 0.0 A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) A_ : Optional[Any] = special_care * 0.01 A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
300
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _lowerCAmelCase : int = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class __magic_name__ ( unittest.TestCase ): """simple docstring""" __UpperCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __UpperCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __UpperCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __UpperCamelCase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[str] , snake_case :List[str] , snake_case :Optional[Any] ): '''simple docstring''' A_ : List[Any] = ZeroShotClassificationPipeline( model=snake_case , tokenizer=snake_case , candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Union[str, Any] , snake_case :str ): '''simple docstring''' A_ : List[str] = classifier("Who are you voting for in 2020?" , candidate_labels="politics" ) self.assertEqual(snake_case , {"sequence": ANY(snake_case ), "labels": [ANY(snake_case )], "scores": [ANY(snake_case )]} ) # No kwarg A_ : Tuple = classifier("Who are you voting for in 2020?" , ["politics"] ) self.assertEqual(snake_case , {"sequence": ANY(snake_case ), "labels": [ANY(snake_case )], "scores": [ANY(snake_case )]} ) A_ : Union[str, Any] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] ) self.assertEqual(snake_case , {"sequence": ANY(snake_case ), "labels": [ANY(snake_case )], "scores": [ANY(snake_case )]} ) A_ : Optional[Any] = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" ) self.assertEqual( snake_case , {"sequence": ANY(snake_case ), "labels": [ANY(snake_case ), ANY(snake_case )], "scores": [ANY(snake_case ), ANY(snake_case )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) A_ : int = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] ) self.assertEqual( snake_case , {"sequence": ANY(snake_case ), "labels": [ANY(snake_case ), ANY(snake_case )], "scores": [ANY(snake_case ), ANY(snake_case )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) A_ : Optional[Any] = classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" ) self.assertEqual(snake_case , {"sequence": ANY(snake_case ), "labels": [ANY(snake_case )], "scores": [ANY(snake_case )]} ) # https://github.com/huggingface/transformers/issues/13846 A_ : int = classifier(["I am happy"] , ["positive", "negative"] ) self.assertEqual( snake_case , [ {"sequence": ANY(snake_case ), "labels": [ANY(snake_case ), ANY(snake_case )], "scores": [ANY(snake_case ), ANY(snake_case )]} for i in range(1 ) ] , ) A_ : Optional[int] = classifier(["I am happy", "I am sad"] , ["positive", "negative"] ) self.assertEqual( snake_case , [ {"sequence": ANY(snake_case ), "labels": [ANY(snake_case ), ANY(snake_case )], "scores": [ANY(snake_case ), ANY(snake_case )]} for i in range(2 ) ] , ) with self.assertRaises(snake_case ): classifier("" , candidate_labels="politics" ) with self.assertRaises(snake_case ): classifier(snake_case , candidate_labels="politics" ) with self.assertRaises(snake_case ): classifier("Who are you voting for in 2020?" , candidate_labels="" ) with self.assertRaises(snake_case ): classifier("Who are you voting for in 2020?" , candidate_labels=snake_case ) with self.assertRaises(snake_case ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , ) with self.assertRaises(snake_case ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=snake_case , ) self.run_entailment_id(snake_case ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Pipeline ): '''simple docstring''' A_ : str = zero_shot_classifier.model.config A_ : Dict = config.labelaid A_ : List[Any] = zero_shot_classifier.entailment_id A_ : Tuple = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) A_ : Dict = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) A_ : Any = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) A_ : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) A_ : List[Any] = original_labelaid self.assertEqual(snake_case , zero_shot_classifier.entailment_id ) @require_torch def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Optional[int] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] ) @require_torch def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : Dict = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) A_ : List[Any] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(snake_case ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], } , ) @require_tf def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : int = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , ) A_ : List[Any] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(snake_case ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], } , ) @slow @require_torch def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : List[str] = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" ) A_ : List[Any] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(snake_case ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], } , ) A_ : Any = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=snake_case , ) self.assertEqual( nested_simplify(snake_case ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], } , ) @slow @require_tf def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" ) A_ : List[str] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(snake_case ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], } , ) A_ : Optional[Any] = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=snake_case , ) self.assertEqual( nested_simplify(snake_case ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], } , )
300
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]: A_ : Tuple = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict: for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" ) A_ : List[Any] = in_proj_weight[ : encoder_config.hidden_size, : ] A_ : Optional[Any] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A_ : Optional[Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any: A_ : Dict = dct.pop(_lowerCAmelCase ) A_ : List[Any] = val def __snake_case ( _lowerCAmelCase : List[str] ) -> int: if "handwritten" in checkpoint_url: A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" ) return im @torch.no_grad() def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]: A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase ) A_ : Tuple = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A_ : Tuple = 768 elif "large" in checkpoint_url: # use ViT-large encoder A_ : Optional[Any] = 1024 A_ : Union[str, Any] = 4096 A_ : Union[str, Any] = 24 A_ : List[Any] = 16 A_ : List[str] = 1024 else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Dict = False A_ : int = "relu" A_ : Optional[int] = 1024 A_ : Any = True A_ : List[Any] = False A_ : Optional[int] = False # load HuggingFace model A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase ) A_ : str = TrOCRForCausalLM(_lowerCAmelCase ) A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) model.eval() # load state_dict of original model, rename some keys A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"] A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A_ : Dict = state_dict.pop(_lowerCAmelCase ) if key.startswith("decoder" ) and "output_projection" not in key: A_ : List[str] = val else: A_ : Optional[Any] = val # load state dict model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size ) A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" ) A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase ) A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values # verify logits A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ) A_ : Tuple = outputs.logits A_ : Union[str, Any] = torch.Size([1, 1, 50265] ) if "trocr-base-handwritten" in checkpoint_url: A_ : Union[str, Any] = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: A_ : str = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: A_ : Optional[Any] = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: A_ : Optional[int] = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected" Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : List[str] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
300
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Optional[int] , snake_case :List[Any] ): '''simple docstring''' A_ : str = data def __iter__( self :Optional[int] ): '''simple docstring''' for element in self.data: yield element def __snake_case ( _lowerCAmelCase : Any=True ) -> Optional[Any]: A_ : Union[str, Any] = Accelerator(even_batches=_lowerCAmelCase ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def __snake_case ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : bool = False ) -> Optional[int]: if iterable: A_ : Tuple = DummyIterableDataset(torch.as_tensor(range(_lowerCAmelCase ) ) ) else: A_ : List[str] = TensorDataset(torch.as_tensor(range(_lowerCAmelCase ) ) ) A_ : Optional[int] = DataLoader(_lowerCAmelCase , batch_size=_lowerCAmelCase ) A_ : List[Any] = accelerator.prepare(_lowerCAmelCase ) return dl def __snake_case ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : List[int] , _lowerCAmelCase : List[int] , ) -> Optional[Any]: A_ : int = create_dataloader(accelerator=_lowerCAmelCase , dataset_size=_lowerCAmelCase , batch_size=_lowerCAmelCase ) A_ : str = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def __snake_case ( ) -> Tuple: A_ : int = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( _lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( _lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def __snake_case ( ) -> Optional[int]: A_ : Optional[Any] = create_accelerator(even_batches=_lowerCAmelCase ) verify_dataloader_batch_sizes( _lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( _lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def __snake_case ( ) -> Optional[Any]: A_ : Union[str, Any] = create_accelerator(even_batches=_lowerCAmelCase ) A_ : Optional[Any] = torch.nn.Linear(1 , 1 ) A_ : int = accelerator.prepare(_lowerCAmelCase ) A_ : Tuple = create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 ) A_ : List[str] = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(_lowerCAmelCase ): A_ : Optional[Any] = ddp_model(batch[0].float() ) A_ : Any = output.sum() loss.backward() batch_idxs.append(_lowerCAmelCase ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def __snake_case ( _lowerCAmelCase : Tuple ) -> Any: with warnings.catch_warnings(record=_lowerCAmelCase ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , _lowerCAmelCase ) assert "only supported for multi-GPU" in str(w[-1].message ) def __snake_case ( ) -> List[Any]: A_ : Any = True A_ : List[str] = False A_ : Optional[int] = create_accelerator(even_batches=_lowerCAmelCase ) A_ : List[Any] = torch.nn.Linear(1 , 1 ) A_ : Any = accelerator.prepare(_lowerCAmelCase ) A_ : Optional[Any] = create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 ) A_ : Dict = create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCAmelCase ): A_ : int = train_dl.batch_sampler.even_batches A_ : Any = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def __snake_case ( ) -> str: A_ : List[Any] = True A_ : Dict = False A_ : Union[str, Any] = create_accelerator(even_batches=_lowerCAmelCase ) A_ : str = torch.nn.Linear(1 , 1 ) A_ : List[str] = accelerator.prepare(_lowerCAmelCase ) create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=_lowerCAmelCase ) A_ : Union[str, Any] = create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCAmelCase ): A_ : Tuple = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def __snake_case ( ) -> Union[str, Any]: A_ : Union[str, Any] = create_accelerator() A_ : List[Any] = torch.nn.Linear(1 , 1 ) A_ : Dict = accelerator.prepare(_lowerCAmelCase ) create_dataloader(_lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=_lowerCAmelCase ) with warnings.catch_warnings(record=_lowerCAmelCase ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCAmelCase ): pass assert issubclass(w[-1].category , _lowerCAmelCase ) assert "only supported for map-style datasets" in str(w[-1].message ) def __snake_case ( ) -> Tuple: A_ : Dict = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) A_ : Any = accelerator.state.distributed_type A_ : Optional[int] = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(_lowerCAmelCase ) A_ : Dict = original_state if __name__ == "__main__": main()
300
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = 1 @register_to_config def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ): '''simple docstring''' A_ : Dict = sigma_max # setable values A_ : List[Any] = None self.set_sigmas(snake_case , snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ): '''simple docstring''' return sample def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ): '''simple docstring''' A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ): '''simple docstring''' A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(snake_case , snake_case ) A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) ) A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ): '''simple docstring''' return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) A_ : int = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda A_ : Dict = timesteps.to(self.discrete_sigmas.device ) A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device ) A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device ) A_ : Union[str, Any] = torch.zeros_like(snake_case ) A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods A_ : Optional[int] = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): A_ : Tuple = diffusion.unsqueeze(-1 ) A_ : Optional[Any] = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of A_ : List[Any] = randn_tensor( sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype ) A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device ) # compute step size from the model_output, the noise, and the snr A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term A_ : int = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): A_ : str = step_size.unsqueeze(-1 ) A_ : Optional[Any] = sample + step_size * model_output A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ): '''simple docstring''' A_ : Union[str, Any] = timesteps.to(original_samples.device ) A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps] A_ : List[Any] = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(snake_case ) * sigmas[:, None, None, None] ) A_ : Optional[int] = noise + original_samples return noisy_samples def __len__( self :Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
300
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
300
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float: A_ : Dict = a while True: A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - ( Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307 return float(_lowerCAmelCase ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
300
1
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : """simple docstring""" def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ): '''simple docstring''' A_ : Tuple = parent A_ : int = batch_size A_ : List[str] = image_size A_ : List[Any] = patch_size A_ : Optional[Any] = num_channels A_ : List[Any] = is_training A_ : Tuple = use_labels A_ : Union[str, Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Any = num_attention_heads A_ : List[str] = intermediate_size A_ : Optional[int] = hidden_act A_ : List[str] = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Any = type_sequence_label_size A_ : List[str] = initializer_range A_ : Dict = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Optional[int] = (image_size // patch_size) ** 2 A_ : List[str] = num_patches + 1 def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Tuple = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ): '''simple docstring''' A_ : Optional[Any] = ViTMSNModel(config=snake_case ) model.to(snake_case ) model.eval() A_ : int = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ): '''simple docstring''' A_ : Dict = self.type_sequence_label_size A_ : Tuple = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Union[str, Any] = model(snake_case , labels=snake_case ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : Union[str, Any] = 1 A_ : int = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Optional[Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : List[str] = self.prepare_config_and_inputs() A_ , A_ , A_ : Optional[int] = config_and_inputs A_ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __UpperCamelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Tuple = ViTMSNModelTester(self ) A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(snake_case ) A_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[str] = [*signature.parameters.keys()] A_ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __snake_case ( ) -> Optional[Any]: A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' torch.manual_seed(2 ) A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case ) A_ : List[str] = self.default_image_processor A_ : int = prepare_img() A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case ) # forward pass with torch.no_grad(): A_ : Optional[int] = model(**snake_case ) # verify the logits A_ : List[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
300
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets _lowerCAmelCase : List[Any] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' _lowerCAmelCase : Union[str, Any] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' _lowerCAmelCase : Optional[Any] = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' A_ : List[str] = len(references[0] ) if any(len(snake_case ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A_ : int = [[refs[i] for refs in references] for i in range(snake_case )] A_ : Optional[Any] = TER( normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , ) A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
300
1
def __snake_case ( _lowerCAmelCase : int = 1000000 ) -> int: A_ : Dict = set(range(3 , _lowerCAmelCase , 2 ) ) primes.add(2 ) for p in range(3 , _lowerCAmelCase , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _lowerCAmelCase , _lowerCAmelCase ) ) ) A_ : Tuple = [float(_lowerCAmelCase ) for n in range(limit + 1 )] for p in primes: for n in range(_lowerCAmelCase , limit + 1 , _lowerCAmelCase ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
300
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str: return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Any: return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=float("inf" ) ) -> int: for i in range(points_counts - 1 ): for j in range(i + 1 , _lowerCAmelCase ): A_ : Tuple = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=float("inf" ) ) -> Dict: for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ): for j in range(max(0 , i - 6 ) , _lowerCAmelCase ): A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]: # base case if points_counts <= 3: return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase ) # recursion A_ : Optional[int] = points_counts // 2 A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase ) A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid ) A_ : Tuple = min(_lowerCAmelCase , _lowerCAmelCase ) A_ : Dict = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowerCAmelCase ) A_ : Tuple = dis_between_closest_in_strip( _lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase ) return min(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Any: A_ : Optional[Any] = column_based_sort(_lowerCAmelCase , column=0 ) A_ : Optional[int] = column_based_sort(_lowerCAmelCase , column=1 ) return ( closest_pair_of_points_sqr( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) ** 0.5 if __name__ == "__main__": _lowerCAmelCase : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
300
1
import os import sys import unittest _lowerCAmelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _lowerCAmelCase : Union[str, Any] = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') _lowerCAmelCase : Any = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class __magic_name__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Optional[Any] = get_test_to_tester_mapping(snake_case ) A_ : Dict = get_test_to_tester_mapping(snake_case ) A_ : Optional[int] = {"BertModelTest": "BertModelTester"} A_ : Dict = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(snake_case ) , snake_case ) self.assertEqual(get_test_info.to_json(snake_case ) , snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Union[str, Any] = get_model_to_test_mapping(snake_case ) A_ : Union[str, Any] = get_model_to_test_mapping(snake_case ) A_ : str = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } A_ : Union[str, Any] = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(snake_case ) , snake_case ) self.assertEqual(get_test_info.to_json(snake_case ) , snake_case ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Union[str, Any] = get_model_to_tester_mapping(snake_case ) A_ : Union[str, Any] = get_model_to_tester_mapping(snake_case ) A_ : List[str] = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } A_ : Union[str, Any] = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(snake_case ) , snake_case ) self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
300
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : """simple docstring""" def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ): '''simple docstring''' A_ : Tuple = parent A_ : int = batch_size A_ : List[str] = image_size A_ : List[Any] = patch_size A_ : Optional[Any] = num_channels A_ : List[Any] = is_training A_ : Tuple = use_labels A_ : Union[str, Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Any = num_attention_heads A_ : List[str] = intermediate_size A_ : Optional[int] = hidden_act A_ : List[str] = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Any = type_sequence_label_size A_ : List[str] = initializer_range A_ : Dict = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Optional[int] = (image_size // patch_size) ** 2 A_ : List[str] = num_patches + 1 def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Tuple = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Dict = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ): '''simple docstring''' A_ : Optional[Any] = ViTMSNModel(config=snake_case ) model.to(snake_case ) model.eval() A_ : int = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ): '''simple docstring''' A_ : Dict = self.type_sequence_label_size A_ : Tuple = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Union[str, Any] = model(snake_case , labels=snake_case ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : Union[str, Any] = 1 A_ : int = ViTMSNForImageClassification(snake_case ) model.to(snake_case ) model.eval() A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Optional[Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : List[str] = self.prepare_config_and_inputs() A_ , A_ , A_ : Optional[int] = config_and_inputs A_ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __UpperCamelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Tuple = ViTMSNModelTester(self ) A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(snake_case ) A_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[str] = [*signature.parameters.keys()] A_ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __snake_case ( ) -> Optional[Any]: A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' torch.manual_seed(2 ) A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case ) A_ : List[str] = self.default_image_processor A_ : int = prepare_img() A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case ) # forward pass with torch.no_grad(): A_ : Optional[int] = model(**snake_case ) # verify the logits A_ : List[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
300
1
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder _lowerCAmelCase : List[Any] = '''base_with_context''' def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : int ) -> Optional[int]: A_ : List[Any] = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) ) A_ : int = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): A_ : List[Any] = weights[f"layers_{lyr_num}"] A_ : str = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) A_ : List[str] = ly_weight["attention"] A_ : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) A_ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) A_ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) A_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) A_ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) A_ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) A_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) A_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) A_ : int = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ) -> Optional[int]: A_ : List[Any] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) ) A_ : Optional[int] = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): A_ : Tuple = weights[f"layers_{lyr_num}"] A_ : Dict = ly_weight["attention"] A_ : int = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) A_ : Any = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) A_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) A_ : Any = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) A_ : Any = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) A_ : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) A_ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) A_ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) A_ : int = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) A_ : Dict = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> Any: A_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) ) A_ : List[str] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) ) A_ : str = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCAmelCase ) A_ : List[str] = nn.Parameter( torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) ) for lyr_num, lyr in enumerate(model.decoders ): A_ : Union[str, Any] = weights[f"layers_{lyr_num}"] A_ : List[Any] = nn.Parameter( torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) ) A_ : int = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) ) A_ : Optional[int] = ly_weight["self_attention"] A_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) A_ : Any = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) A_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) A_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) A_ : Dict = ly_weight["MultiHeadDotProductAttention_0"] A_ : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) A_ : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) A_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) A_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) A_ : Optional[Any] = nn.Parameter( torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) ) A_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) A_ : Any = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) ) A_ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) A_ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) A_ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) A_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) ) A_ : Any = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) ) return model def __snake_case ( _lowerCAmelCase : Any ) -> Union[str, Any]: A_ : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path ) A_ : List[Any] = jnp.tree_util.tree_map(onp.array , _lowerCAmelCase ) A_ : Tuple = [ "from __gin__ import dynamic_registration", "from music_spectrogram_diffusion.models.diffusion import diffusion_utils", "diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0", "diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()", ] A_ : Any = os.path.join(args.checkpoint_path , ".." , "config.gin" ) A_ : Optional[int] = inference.parse_training_gin_file(_lowerCAmelCase , _lowerCAmelCase ) A_ : Tuple = inference.InferenceModel(args.checkpoint_path , _lowerCAmelCase ) A_ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" ) A_ : Optional[int] = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) A_ : int = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) A_ : int = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) A_ : List[Any] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _lowerCAmelCase ) A_ : str = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _lowerCAmelCase ) A_ : Optional[int] = load_decoder(ta_checkpoint["target"]["decoder"] , _lowerCAmelCase ) A_ : Optional[Any] = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" ) A_ : Tuple = SpectrogramDiffusionPipeline( notes_encoder=_lowerCAmelCase , continuous_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase , scheduler=_lowerCAmelCase , melgan=_lowerCAmelCase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument( '''--checkpoint_path''', default=F'''{MODEL}/checkpoint_500000''', type=str, required=False, help='''Path to the original jax model checkpoint.''', ) _lowerCAmelCase : List[str] = parser.parse_args() main(args)
300
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = (DDPMScheduler,) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ): '''simple docstring''' A_ : Dict = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**snake_case ) return config def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case , beta_end=snake_case ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.check_over_configs(thresholding=snake_case ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Tuple = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : List[str] = scheduler_class(**snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : int = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : int = scheduler_class(**snake_case ) A_ : Tuple = len(snake_case ) A_ : List[str] = self.dummy_model() A_ : Optional[Any] = self.dummy_sample_deter A_ : List[str] = torch.manual_seed(0 ) for t in reversed(range(snake_case ) ): # 1. predict noise residual A_ : Tuple = model(snake_case , snake_case ) # 2. predict previous mean of sample x_t-1 A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : Optional[int] = pred_prev_sample A_ : Tuple = torch.sum(torch.abs(snake_case ) ) A_ : str = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Optional[int] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config(prediction_type="v_prediction" ) A_ : List[str] = scheduler_class(**snake_case ) A_ : int = len(snake_case ) A_ : Dict = self.dummy_model() A_ : str = self.dummy_sample_deter A_ : Any = torch.manual_seed(0 ) for t in reversed(range(snake_case ) ): # 1. predict noise residual A_ : Optional[int] = model(snake_case , snake_case ) # 2. predict previous mean of sample x_t-1 A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : List[str] = pred_prev_sample A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) ) A_ : List[str] = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.scheduler_classes[0] A_ : Optional[Any] = self.get_scheduler_config() A_ : Dict = scheduler_class(**snake_case ) A_ : Optional[int] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=snake_case ) A_ : Optional[int] = scheduler.timesteps for i, timestep in enumerate(snake_case ): if i == len(snake_case ) - 1: A_ : str = -1 else: A_ : List[str] = timesteps[i + 1] A_ : Optional[int] = scheduler.previous_timestep(snake_case ) A_ : List[str] = prev_t.item() self.assertEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Optional[Any] = self.scheduler_classes[0] A_ : int = self.get_scheduler_config() A_ : Tuple = scheduler_class(**snake_case ) A_ : List[str] = [100, 87, 50, 51, 0] with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Any = self.scheduler_classes[0] A_ : Union[str, Any] = self.get_scheduler_config() A_ : Optional[int] = scheduler_class(**snake_case ) A_ : Union[str, Any] = [100, 87, 50, 1, 0] A_ : Optional[int] = len(snake_case ) with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Union[str, Any] = self.scheduler_classes[0] A_ : Optional[Any] = self.get_scheduler_config() A_ : Optional[int] = scheduler_class(**snake_case ) A_ : Optional[int] = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=snake_case )
300
1
from __future__ import annotations _lowerCAmelCase : Dict = tuple[int, int, int] _lowerCAmelCase : Optional[Any] = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase _lowerCAmelCase : Union[str, Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' # -------------------------- default selection -------------------------- # rotors -------------------------- _lowerCAmelCase : List[Any] = '''EGZWVONAHDCLFQMSIPJBYUKXTR''' _lowerCAmelCase : Any = '''FOBHMDKEXQNRAULPGSJVTYICZW''' _lowerCAmelCase : Union[str, Any] = '''ZJXESIUQLHAVRMDOYGTNFWPBKC''' # reflector -------------------------- _lowerCAmelCase : List[Any] = { '''A''': '''N''', '''N''': '''A''', '''B''': '''O''', '''O''': '''B''', '''C''': '''P''', '''P''': '''C''', '''D''': '''Q''', '''Q''': '''D''', '''E''': '''R''', '''R''': '''E''', '''F''': '''S''', '''S''': '''F''', '''G''': '''T''', '''T''': '''G''', '''H''': '''U''', '''U''': '''H''', '''I''': '''V''', '''V''': '''I''', '''J''': '''W''', '''W''': '''J''', '''K''': '''X''', '''X''': '''K''', '''L''': '''Y''', '''Y''': '''L''', '''M''': '''Z''', '''Z''': '''M''', } # -------------------------- extra rotors -------------------------- _lowerCAmelCase : List[Any] = '''RMDJXFUWGISLHVTCQNKYPBEZOA''' _lowerCAmelCase : Union[str, Any] = '''SGLCPQWZHKXAREONTFBVIYJUDM''' _lowerCAmelCase : List[Any] = '''HVSICLTYKQUBXDWAJZOMFGPREN''' _lowerCAmelCase : Optional[Any] = '''RZWQHFMVDBKICJLNTUXAGYPSOE''' _lowerCAmelCase : List[Any] = '''LFKIJODBEGAMQPXVUHYSTCZRWN''' _lowerCAmelCase : Dict = '''KOAEGVDHXPQZMLFTYWJNBRCIUS''' def __snake_case ( _lowerCAmelCase : RotorPositionT , _lowerCAmelCase : RotorSelectionT , _lowerCAmelCase : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: # Checks if there are 3 unique rotors if (unique_rotsel := len(set(_lowerCAmelCase ) )) < 3: A_ : List[Any] = f"Please use 3 unique rotors (not {unique_rotsel})" raise Exception(_lowerCAmelCase ) # Checks if rotor positions are valid A_ , A_ , A_ : List[str] = rotpos if not 0 < rotorposa <= len(_lowerCAmelCase ): A_ : str = f"First rotor position is not within range of 1..26 ({rotorposa}" raise ValueError(_lowerCAmelCase ) if not 0 < rotorposa <= len(_lowerCAmelCase ): A_ : str = f"Second rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_lowerCAmelCase ) if not 0 < rotorposa <= len(_lowerCAmelCase ): A_ : str = f"Third rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_lowerCAmelCase ) # Validates string and returns dict A_ : Optional[Any] = _plugboard(_lowerCAmelCase ) return rotpos, rotsel, pbdict def __snake_case ( _lowerCAmelCase : str ) -> dict[str, str]: # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): A_ : Union[str, Any] = f"Plugboard setting isn't type string ({type(_lowerCAmelCase )})" raise TypeError(_lowerCAmelCase ) elif len(_lowerCAmelCase ) % 2 != 0: A_ : Union[str, Any] = f"Odd number of symbols ({len(_lowerCAmelCase )})" raise Exception(_lowerCAmelCase ) elif pbstring == "": return {} pbstring.replace(" " , "" ) # Checks if all characters are unique A_ : Dict = set() for i in pbstring: if i not in abc: A_ : Union[str, Any] = f"'{i}' not in list of symbols" raise Exception(_lowerCAmelCase ) elif i in tmppbl: A_ : str = f"Duplicate symbol ({i})" raise Exception(_lowerCAmelCase ) else: tmppbl.add(_lowerCAmelCase ) del tmppbl # Created the dictionary A_ : str = {} for j in range(0 , len(_lowerCAmelCase ) - 1 , 2 ): A_ : Optional[Any] = pbstring[j + 1] A_ : Dict = pbstring[j] return pb def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : RotorPositionT , _lowerCAmelCase : RotorSelectionT = (rotora, rotora, rotora) , _lowerCAmelCase : str = "" , ) -> str: A_ : str = text.upper() A_ , A_ , A_ : int = _validator( _lowerCAmelCase , _lowerCAmelCase , plugb.upper() ) A_ , A_ , A_ : int = rotor_position A_ , A_ , A_ : str = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 A_ : str = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: A_ : Any = plugboard[symbol] # rotor ra -------------------------- A_ : List[Any] = abc.index(_lowerCAmelCase ) + rotorposa A_ : List[str] = rotora[index % len(_lowerCAmelCase )] # rotor rb -------------------------- A_ : List[Any] = abc.index(_lowerCAmelCase ) + rotorposa A_ : Optional[int] = rotora[index % len(_lowerCAmelCase )] # rotor rc -------------------------- A_ : Any = abc.index(_lowerCAmelCase ) + rotorposa A_ : Tuple = rotora[index % len(_lowerCAmelCase )] # reflector -------------------------- # this is the reason you don't need another machine to decipher A_ : int = reflector[symbol] # 2nd rotors A_ : Tuple = abc[rotora.index(_lowerCAmelCase ) - rotorposa] A_ : Union[str, Any] = abc[rotora.index(_lowerCAmelCase ) - rotorposa] A_ : Optional[Any] = abc[rotora.index(_lowerCAmelCase ) - rotorposa] # 2nd plugboard if symbol in plugboard: A_ : Any = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_lowerCAmelCase ): A_ : List[Any] = 0 rotorposa += 1 if rotorposa >= len(_lowerCAmelCase ): A_ : Union[str, Any] = 0 rotorposa += 1 if rotorposa >= len(_lowerCAmelCase ): A_ : str = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_lowerCAmelCase ) return "".join(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = '''This is my Python script that emulates the Enigma machine from WWII.''' _lowerCAmelCase : Tuple = (1, 1, 1) _lowerCAmelCase : List[str] = '''pictures''' _lowerCAmelCase : str = (rotora, rotora, rotora) _lowerCAmelCase : int = enigma(message, rotor_pos, rotor_sel, pb) print('''Encrypted message:''', en) print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
300
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : int = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]: for attribute in key.split("." ): A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: A_ : Tuple = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[int] = value elif weight_type == "weight_v": A_ : Any = value elif weight_type == "bias": A_ : str = value else: A_ : Any = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]: A_ : Optional[Any] = [] A_ : Any = fairseq_model.state_dict() A_ : Union[str, Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ : str = None for name, value in fairseq_dict.items(): A_ : Tuple = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) A_ : Optional[Any] = True elif name.split("." )[0] == "proj": A_ : Dict = fairseq_model.proj A_ : List[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ : int = True if "*" in mapped_key: A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2] A_ : int = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: A_ : List[Any] = "weight_g" elif "weight_v" in name: A_ : List[Any] = "weight_v" elif "bias" in name: A_ : Dict = "bias" elif "weight" in name: A_ : List[Any] = "weight" else: A_ : Dict = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: A_ : Any = full_name.split("conv_layers." )[-1] A_ : Optional[int] = name.split("." ) A_ : Optional[Any] = int(items[0] ) A_ : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) A_ : List[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) A_ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) A_ : List[Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) A_ : Tuple = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str: A_ , A_ : List[str] = emb.weight.shape A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) A_ : List[Any] = emb.weight.data return lin_layer def __snake_case ( _lowerCAmelCase : str ) -> Tuple: with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: A_ : int = f.readlines() A_ : Dict = [line.split(" " )[0] for line in lines] A_ : Tuple = len(_lowerCAmelCase ) A_ : Union[str, Any] = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple: A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) A_ : str = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) A_ : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ : Union[str, Any] = model[0].eval() # set weights for wav2vec2 encoder A_ : Tuple = WavaVecaModel(_lowerCAmelCase ) A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase ) A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("embed_out" ) A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) A_ : Optional[Any] = False # add projection layer A_ : Optional[Any] = nn.Parameter(projection_layer.weight ) A_ : int = nn.Parameter(projection_layer.bias ) A_ : str = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) A_ : Optional[int] = hf_wavavec.config.to_dict() A_ : int = tokenizer.pad_token_id A_ : List[str] = tokenizer.bos_token_id A_ : List[str] = tokenizer.eos_token_id A_ : List[str] = "speech_to_text_2" A_ : Tuple = "wav2vec2" A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') _lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
300
1
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings _lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} ) __UpperCamelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) __UpperCamelCase = field( default=lowerCamelCase__ , metadata={ '''help''': ( '''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `max_length` value of the model configuration.''' ) } , ) __UpperCamelCase = field( default=lowerCamelCase__ , metadata={ '''help''': ( '''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `num_beams` value of the model configuration.''' ) } , ) __UpperCamelCase = field( default=lowerCamelCase__ , metadata={ '''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.''' } , ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : List[str] = super().to_dict() for k, v in d.items(): if isinstance(snake_case , snake_case ): A_ : Union[str, Any] = v.to_dict() return d
300
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : """simple docstring""" def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ): '''simple docstring''' A_ : str = parent A_ : str = batch_size A_ : str = seq_length A_ : Any = is_training A_ : Any = use_input_mask A_ : str = use_token_type_ids A_ : Tuple = use_labels A_ : Optional[Any] = vocab_size A_ : Dict = hidden_size A_ : str = num_hidden_layers A_ : Dict = num_attention_heads A_ : str = intermediate_size A_ : int = hidden_act A_ : List[Any] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : List[Any] = type_vocab_size A_ : Any = type_sequence_label_size A_ : Dict = initializer_range A_ : Any = num_labels A_ : Optional[int] = num_choices A_ : Optional[Any] = scope A_ : Any = range_bbox def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A_ : str = bbox[i, j, 3] A_ : Union[str, Any] = bbox[i, j, 1] A_ : List[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: A_ : Any = bbox[i, j, 2] A_ : Tuple = bbox[i, j, 0] A_ : int = t A_ : int = tf.convert_to_tensor(snake_case ) A_ : Any = None if self.use_input_mask: A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : str = None if self.use_token_type_ids: A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Dict = None A_ : List[Any] = None A_ : List[str] = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : str = ids_tensor([self.batch_size] , self.num_choices ) A_ : int = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ): '''simple docstring''' A_ : Any = TFLayoutLMModel(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) A_ : str = model(snake_case , snake_case , token_type_ids=snake_case ) A_ : List[Any] = model(snake_case , snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ): '''simple docstring''' A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Union[str, Any] = self.num_labels A_ : int = TFLayoutLMForSequenceClassification(config=snake_case ) A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.num_labels A_ : str = TFLayoutLMForTokenClassification(config=snake_case ) A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case ) A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = config_and_inputs A_ : Optional[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 10 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Tuple = TFLayoutLMModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def __snake_case ( ) -> Optional[Any]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs() # forward pass A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the sequence output on [0, :3, :3] A_ : List[Any] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) ) # test the pooled output on [1, :3] A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs() # forward pass A_ : Dict = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar A_ : List[str] = outputs.loss A_ : Union[str, Any] = (2,) self.assertEqual(loss.shape , snake_case ) # test the shape of the logits A_ : Tuple = outputs.logits A_ : Tuple = (2, 2) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) # test the shape of the logits A_ : Dict = outputs.logits A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the shape of the logits A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , snake_case ) self.assertEqual(outputs.end_logits.shape , snake_case )
300
1
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters _lowerCAmelCase : int = logging.get_logger(__name__) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=None ) -> Dict: # Recurse if needed if "." in tensor_name: A_ : Tuple = tensor_name.split("." ) for split in splits[:-1]: A_ : List[str] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if new_module is None: raise ValueError(f"{module} has no attribute {split}." ) A_ : str = new_module A_ : List[str] = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." ) A_ : Dict = tensor_name in module._buffers A_ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None: raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." ) A_ : Tuple = False A_ : List[str] = False if is_buffer or not is_bitsandbytes_available(): A_ : str = False A_ : Union[str, Any] = False else: A_ : Tuple = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) A_ : Any = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: A_ : List[str] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: A_ : Union[str, Any] = old_value.to(_lowerCAmelCase ) elif isinstance(_lowerCAmelCase , torch.Tensor ): A_ : str = value.to("cpu" ) if value.dtype == torch.inta: A_ : Union[str, Any] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse( "0.37.2" ) if not is_abit_serializable: raise ValueError( "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) else: A_ : Any = torch.tensor(_lowerCAmelCase , device="cpu" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , _lowerCAmelCase ) and fpaa_statistics is None: A_ : Any = new_value.T A_ : List[str] = old_value.__dict__ if is_abit: A_ : Optional[int] = bnb.nn.IntaParams(_lowerCAmelCase , requires_grad=_lowerCAmelCase , **_lowerCAmelCase ).to(_lowerCAmelCase ) elif is_abit: A_ : Any = bnb.nn.Paramsabit(_lowerCAmelCase , requires_grad=_lowerCAmelCase , **_lowerCAmelCase ).to(_lowerCAmelCase ) A_ : List[str] = new_value if fpaa_statistics is not None: setattr(module.weight , "SCB" , fpaa_statistics.to(_lowerCAmelCase ) ) else: if value is None: A_ : str = old_value.to(_lowerCAmelCase ) elif isinstance(_lowerCAmelCase , torch.Tensor ): A_ : Optional[Any] = value.to(_lowerCAmelCase ) else: A_ : Optional[int] = torch.tensor(_lowerCAmelCase , device=_lowerCAmelCase ) if is_buffer: A_ : Optional[Any] = new_value else: A_ : Any = nn.Parameter(_lowerCAmelCase , requires_grad=old_value.requires_grad ) A_ : Dict = new_value def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : int=False ) -> Union[str, Any]: for name, module in model.named_children(): if current_key_name is None: A_ : Tuple = [] current_key_name.append(_lowerCAmelCase ) if (isinstance(_lowerCAmelCase , nn.Linear ) or isinstance(_lowerCAmelCase , _lowerCAmelCase )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in ".".join(_lowerCAmelCase ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): A_ , A_ : List[str] = module.weight.shape else: A_ : Union[str, Any] = module.in_features A_ : List[Any] = module.out_features if quantization_config.quantization_method() == "llm_int8": A_ : Union[str, Any] = bnb.nn.LinearabitLt( _lowerCAmelCase , _lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) A_ : List[Any] = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: A_ : List[Any] = bnb.nn.Linearabit( _lowerCAmelCase , _lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) A_ : Any = True # Store the module class in case we need to transpose the weight later A_ : int = type(_lowerCAmelCase ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(_lowerCAmelCase ) if len(list(module.children() ) ) > 0: A_ , A_ : Optional[int] = _replace_with_bnb_linear( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , has_been_replaced=_lowerCAmelCase , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=None , _lowerCAmelCase : Tuple=None ) -> Optional[Any]: A_ : int = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert A_ , A_ : Union[str, Any] = _replace_with_bnb_linear( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def __snake_case ( *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[Any] ) -> int: warnings.warn( "`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _lowerCAmelCase , ) return replace_with_bnb_linear(*_lowerCAmelCase , **_lowerCAmelCase ) def __snake_case ( *_lowerCAmelCase : Dict , **_lowerCAmelCase : List[str] ) -> List[str]: warnings.warn( "`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _lowerCAmelCase , ) return set_module_quantized_tensor_to_device(*_lowerCAmelCase , **_lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Dict ) -> Union[str, Any]: A_ : Optional[int] = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() A_ : Optional[Any] = find_tied_parameters(_lowerCAmelCase ) # For compatibility with Accelerate < 0.18 if isinstance(_lowerCAmelCase , _lowerCAmelCase ): A_ : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A_ : Any = sum(_lowerCAmelCase , [] ) A_ : Optional[Any] = len(_lowerCAmelCase ) > 0 # Check if it is a base model A_ : List[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A_ : int = list(model.named_children() ) A_ : Any = [list_modules[-1][0]] # add last module together with tied weights A_ : Optional[int] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) A_ : Optional[int] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase ) # remove ".weight" from the keys A_ : Optional[int] = [".weight", ".bias"] A_ : Optional[Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A_ : str = name.replace(_lowerCAmelCase , "" ) filtered_module_names.append(_lowerCAmelCase ) return filtered_module_names
300
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore _lowerCAmelCase : Optional[int] = ''' Human: <<task>> Assistant: ''' _lowerCAmelCase : int = '''huggingface-tools/default-prompts''' _lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''} def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]: if prompt_or_repo_id is None: A_ : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id A_ : Optional[Any] = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
300
1
from collections.abc import Callable import numpy as np def __snake_case ( _lowerCAmelCase : Callable , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> np.array: A_ : List[Any] = int(np.ceil((x_end - xa) / step_size ) ) A_ : Dict = np.zeros((n + 1,) ) A_ : str = ya A_ : str = xa for k in range(_lowerCAmelCase ): A_ : int = y[k] + step_size * ode_func(_lowerCAmelCase , y[k] ) A_ : Tuple = y[k] + ( (step_size / 2) * (ode_func(_lowerCAmelCase , y[k] ) + ode_func(x + step_size , _lowerCAmelCase )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
300
def __snake_case ( _lowerCAmelCase : list ) -> list: if len(_lowerCAmelCase ) <= 1: return [tuple(_lowerCAmelCase )] A_ : Tuple = [] def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ): A_ : List[str] = [0] * n res.append(tuple(_lowerCAmelCase ) ) A_ : int = 0 while i < n: if c[i] < i: if i % 2 == 0: A_ , A_ : str = arr[i], arr[0] else: A_ , A_ : List[str] = arr[i], arr[c[i]] res.append(tuple(_lowerCAmelCase ) ) c[i] += 1 A_ : Tuple = 0 else: A_ : Dict = 0 i += 1 generate(len(_lowerCAmelCase ) , _lowerCAmelCase ) return res if __name__ == "__main__": _lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip() _lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
300
1
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': TFMobileBertModel, '''fill-mask''': TFMobileBertForMaskedLM, '''question-answering''': TFMobileBertForQuestionAnswering, '''text-classification''': TFMobileBertForSequenceClassification, '''token-classification''': TFMobileBertForTokenClassification, '''zero-shot''': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Optional[Any] , snake_case :int , snake_case :Tuple=False ): '''simple docstring''' A_ : Optional[int] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) if return_labels: if model_class in get_values(snake_case ): A_ : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Dict , snake_case :List[str] , snake_case :List[Any]=13 , snake_case :List[str]=7 , snake_case :int=True , snake_case :Optional[int]=True , snake_case :List[str]=True , snake_case :List[Any]=True , snake_case :Tuple=99 , snake_case :Union[str, Any]=32 , snake_case :str=32 , snake_case :Any=2 , snake_case :Optional[int]=4 , snake_case :Tuple=37 , snake_case :List[str]="gelu" , snake_case :Tuple=0.1 , snake_case :List[Any]=0.1 , snake_case :List[Any]=512 , snake_case :List[Any]=16 , snake_case :Dict=2 , snake_case :Union[str, Any]=0.02 , snake_case :Tuple=3 , snake_case :Optional[int]=4 , snake_case :Tuple=None , ): '''simple docstring''' A_ : Optional[Any] = parent A_ : Tuple = batch_size A_ : int = seq_length A_ : Union[str, Any] = is_training A_ : Optional[int] = use_input_mask A_ : Union[str, Any] = use_token_type_ids A_ : Tuple = use_labels A_ : str = vocab_size A_ : Dict = hidden_size A_ : Union[str, Any] = num_hidden_layers A_ : List[str] = num_attention_heads A_ : List[Any] = intermediate_size A_ : Optional[int] = hidden_act A_ : str = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Dict = max_position_embeddings A_ : Any = type_vocab_size A_ : Optional[Any] = type_sequence_label_size A_ : Union[str, Any] = initializer_range A_ : Optional[Any] = num_labels A_ : Optional[int] = num_choices A_ : Any = scope A_ : List[str] = embedding_size def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Union[str, Any] = None if self.use_input_mask: A_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Optional[Any] = None if self.use_token_type_ids: A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : str = None A_ : List[Any] = None A_ : Union[str, Any] = None if self.use_labels: A_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : Any = ids_tensor([self.batch_size] , self.num_choices ) A_ : Union[str, Any] = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self :Any , snake_case :int , snake_case :Optional[int] , snake_case :Dict , snake_case :List[str] , snake_case :str , snake_case :Union[str, Any] , snake_case :Optional[Any] ): '''simple docstring''' A_ : int = TFMobileBertModel(config=snake_case ) A_ : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ : Dict = model(snake_case ) A_ : Union[str, Any] = [input_ids, input_mask] A_ : Optional[int] = model(snake_case ) A_ : str = model(snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[Any] , snake_case :Dict , snake_case :Dict , snake_case :List[Any] , snake_case :List[Any] , snake_case :int , snake_case :Tuple ): '''simple docstring''' A_ : str = TFMobileBertForMaskedLM(config=snake_case ) A_ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ : Optional[int] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :Optional[int] , snake_case :int , snake_case :List[Any] , snake_case :Optional[Any] , snake_case :Any , snake_case :Dict ): '''simple docstring''' A_ : List[Any] = TFMobileBertForNextSentencePrediction(config=snake_case ) A_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ : List[str] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def SCREAMING_SNAKE_CASE ( self :str , snake_case :Tuple , snake_case :str , snake_case :int , snake_case :Tuple , snake_case :Dict , snake_case :int , snake_case :int ): '''simple docstring''' A_ : int = TFMobileBertForPreTraining(config=snake_case ) A_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ : Dict = model(snake_case ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Any , snake_case :str , snake_case :Union[str, Any] , snake_case :List[str] , snake_case :Any , snake_case :Tuple , snake_case :Any ): '''simple docstring''' A_ : Optional[int] = self.num_labels A_ : List[Any] = TFMobileBertForSequenceClassification(config=snake_case ) A_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ : str = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :str , snake_case :List[Any] , snake_case :Union[str, Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Dict , snake_case :str ): '''simple docstring''' A_ : Optional[Any] = self.num_choices A_ : List[Any] = TFMobileBertForMultipleChoice(config=snake_case ) A_ : List[str] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) ) A_ : Optional[Any] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) ) A_ : Any = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) ) A_ : Dict = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A_ : Dict = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :Any , snake_case :Dict , snake_case :List[str] , snake_case :Optional[Any] , snake_case :str ): '''simple docstring''' A_ : List[Any] = self.num_labels A_ : Dict = TFMobileBertForTokenClassification(config=snake_case ) A_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ : Optional[Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Any , snake_case :Optional[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :int ): '''simple docstring''' A_ : List[Any] = TFMobileBertForQuestionAnswering(config=snake_case ) A_ : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ : Tuple = model(snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : List[Any] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Any = config_and_inputs A_ : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Tuple = TFMobileBertModelTest.TFMobileBertModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' for model_name in ["google/mobilebert-uncased"]: A_ : str = TFMobileBertModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Union[str, Any] = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" ) A_ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) A_ : Dict = model(snake_case )[0] A_ : Dict = [1, 6, 30_522] self.assertEqual(output.shape , snake_case ) A_ : List[str] = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
300
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer _lowerCAmelCase : int = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase : List[Any] = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } _lowerCAmelCase : Any = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = RobertaTokenizer def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ): '''simple docstring''' super().__init__( snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , ) A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space: A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) ) A_ : Optional[int] = add_prefix_space A_ : int = pre_tok_class(**snake_case ) A_ : Optional[int] = add_prefix_space A_ : Optional[int] = "post_processor" A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case ) if tokenizer_component_instance: A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : List[Any] = tuple(state["sep"] ) if "cls" in state: A_ : Optional[Any] = tuple(state["cls"] ) A_ : Tuple = False if state.get("add_prefix_space" , snake_case ) != add_prefix_space: A_ : List[Any] = add_prefix_space A_ : Optional[int] = True if state.get("trim_offsets" , snake_case ) != trim_offsets: A_ : List[str] = trim_offsets A_ : Any = True if changes_to_apply: A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) ) A_ : Any = component_class(**snake_case ) setattr(self.backend_tokenizer , snake_case , snake_case ) @property def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ): '''simple docstring''' A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value A_ : Any = value def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ): '''simple docstring''' A_ : Any = kwargs.get("is_split_into_words" , snake_case ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ): '''simple docstring''' A_ : Any = kwargs.get("is_split_into_words" , snake_case ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case , **snake_case ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ): '''simple docstring''' A_ : str = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ): '''simple docstring''' A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ): '''simple docstring''' A_ : Any = [self.sep_token_id] A_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
300
1
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :List[Any] ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case , scheduler=snake_case ) @torch.no_grad() def __call__( self :Optional[int] , snake_case :int = 1 , snake_case :int = 100 , snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case :Optional[float] = None , snake_case :bool = True , ): '''simple docstring''' if audio_length_in_s is None: A_ : Optional[int] = self.unet.config.sample_size / self.unet.config.sample_rate A_ : Tuple = audio_length_in_s * self.unet.config.sample_rate A_ : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." ) A_ : int = int(snake_case ) if sample_size % down_scale_factor != 0: A_ : Tuple = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) A_ : List[str] = int(snake_case ) A_ : str = next(iter(self.unet.parameters() ) ).dtype A_ : List[Any] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(snake_case )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) A_ : List[Any] = randn_tensor(snake_case , generator=snake_case , device=self.device , dtype=snake_case ) # set step values self.scheduler.set_timesteps(snake_case , device=audio.device ) A_ : Dict = self.scheduler.timesteps.to(snake_case ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A_ : Tuple = self.unet(snake_case , snake_case ).sample # 2. compute previous image: x_t -> t_t-1 A_ : List[str] = self.scheduler.step(snake_case , snake_case , snake_case ).prev_sample A_ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy() A_ : Union[str, Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=snake_case )
300
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCAmelCase : int = '''\ @misc{wu2016googles, title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } ''' _lowerCAmelCase : Tuple = '''\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the \'GLEU score\'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score\'s range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. ''' _lowerCAmelCase : int = '''\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: \'google_bleu\': google_bleu score Examples: Example 1: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.44 Example 2: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.61 Example 3: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results["google_bleu"], 2)) 0.53 Example 4: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results["google_bleu"], 2)) 0.4 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case ) }
300
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __magic_name__ : """simple docstring""" def __init__( self :Dict , snake_case :Optional[Any] , snake_case :Tuple=2 , snake_case :Union[str, Any]=True , snake_case :Dict=False , snake_case :int=10 , snake_case :List[str]=3 , snake_case :List[str]=32 * 8 , snake_case :Any=32 * 8 , snake_case :List[Any]=4 , snake_case :Tuple=64 , ): '''simple docstring''' A_ : Optional[int] = parent A_ : int = batch_size A_ : Optional[Any] = is_training A_ : Tuple = use_auxiliary_loss A_ : Dict = num_queries A_ : List[Any] = num_channels A_ : Dict = min_size A_ : str = max_size A_ : Optional[Any] = num_labels A_ : Optional[int] = hidden_dim A_ : Any = hidden_dim def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( snake_case ) A_ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case ) A_ : List[str] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case ) > 0.5 ).float() A_ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=snake_case ) > 0.5).long() A_ : int = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[Any] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) A_ : Optional[int] = self.num_queries A_ : Optional[int] = self.num_labels A_ : List[str] = [1, 1, 1, 1] A_ : Optional[Any] = self.num_channels A_ : Optional[Any] = 64 A_ : Optional[int] = 128 A_ : Tuple = self.hidden_dim A_ : Union[str, Any] = self.hidden_dim A_ : List[str] = self.hidden_dim return config def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ , A_ , A_ , A_ , A_ : List[str] = self.prepare_config_and_inputs() A_ : List[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Tuple , snake_case :Tuple ): '''simple docstring''' A_ : List[str] = output.encoder_hidden_states A_ : Optional[int] = output.pixel_decoder_hidden_states A_ : int = output.transformer_decoder_hidden_states self.parent.assertTrue(len(snake_case ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case ) , config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[str] , snake_case :Tuple , snake_case :int , snake_case :str=False ): '''simple docstring''' with torch.no_grad(): A_ : Optional[int] = MaskaFormerModel(config=snake_case ) model.to(snake_case ) model.eval() A_ : List[str] = model(pixel_values=snake_case , pixel_mask=snake_case ) A_ : Tuple = model(snake_case , output_hidden_states=snake_case ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[Any] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Optional[Any] , snake_case :Dict ): '''simple docstring''' A_ : Any = MaskaFormerForUniversalSegmentation(config=snake_case ) model.to(snake_case ) model.eval() def comm_check_on_output(snake_case :int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): A_ : Optional[Any] = model(pixel_values=snake_case , pixel_mask=snake_case ) A_ : str = model(snake_case ) comm_check_on_output(snake_case ) A_ : Union[str, Any] = model( pixel_values=snake_case , pixel_mask=snake_case , mask_labels=snake_case , class_labels=snake_case ) comm_check_on_output(snake_case ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCamelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : Tuple = MaskaFormerModelTester(self ) A_ : int = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case ) @unittest.skip(reason="Mask2Former does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' pass @unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="Mask2Former is not a generative model" ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="Mask2Former does not use token embeddings" ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Tuple = model_class(snake_case ) A_ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[Any] = [*signature.parameters.keys()] A_ : Optional[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: A_ : Any = MaskaFormerModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : List[Any] = (self.model_tester.min_size,) * 2 A_ : Optional[int] = { "pixel_values": torch.randn((2, 3, *size) , device=snake_case ), "mask_labels": torch.randn((2, 10, *size) , device=snake_case ), "class_labels": torch.zeros(2 , 10 , device=snake_case ).long(), } A_ : Dict = self.model_tester.get_config() A_ : Optional[int] = MaskaFormerForUniversalSegmentation(snake_case ).to(snake_case ) A_ : List[str] = model(**snake_case ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : int = model_class(snake_case ).to(snake_case ) A_ : Any = model(**snake_case , output_attentions=snake_case ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' if not self.model_tester.is_training: return A_ : List[str] = self.all_model_classes[1] A_ , A_ , A_ , A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() A_ : List[str] = model_class(snake_case ) model.to(snake_case ) model.train() A_ : Union[str, Any] = model(snake_case , mask_labels=snake_case , class_labels=snake_case ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' A_ : Dict = self.all_model_classes[1] A_ , A_ , A_ , A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() A_ : List[str] = True A_ : str = True A_ : Optional[int] = model_class(snake_case ).to(snake_case ) model.train() A_ : Optional[Any] = model(snake_case , mask_labels=snake_case , class_labels=snake_case ) A_ : Dict = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() A_ : Optional[int] = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() A_ : List[Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() A_ : Optional[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=snake_case ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowerCAmelCase : List[str] = 1e-4 def __snake_case ( ) -> Optional[Any]: A_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class __magic_name__ ( unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(snake_case ) A_ : Dict = self.default_image_processor A_ : Union[str, Any] = prepare_img() A_ : Any = image_processor(snake_case , return_tensors="pt" ).to(snake_case ) A_ : Dict = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(snake_case , (1, 3, 384, 384) ) with torch.no_grad(): A_ : Dict = model(**snake_case ) A_ : Any = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(snake_case ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case ) ) A_ : List[Any] = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(snake_case ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case ) ) A_ : Optional[int] = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(snake_case ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case , atol=snake_case ) ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case ).eval() A_ : Dict = self.default_image_processor A_ : Any = prepare_img() A_ : int = image_processor(snake_case , return_tensors="pt" ).to(snake_case ) A_ : Tuple = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(snake_case , (1, 3, 384, 384) ) with torch.no_grad(): A_ : Optional[Any] = model(**snake_case ) # masks_queries_logits A_ : Any = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) A_ : int = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] A_ : str = torch.tensor(snake_case ).to(snake_case ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case , atol=snake_case ) ) # class_queries_logits A_ : Dict = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) A_ : Dict = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case , atol=snake_case ) ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case ).eval() A_ : List[str] = self.default_image_processor A_ : List[Any] = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) A_ : Dict = inputs["pixel_values"].to(snake_case ) A_ : Optional[Any] = [el.to(snake_case ) for el in inputs["mask_labels"]] A_ : Optional[int] = [el.to(snake_case ) for el in inputs["class_labels"]] with torch.no_grad(): A_ : List[str] = model(**snake_case ) self.assertTrue(outputs.loss is not None )
300
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]: A_ : Tuple = tmp_path / "cache" A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str: A_ : List[Any] = tmp_path / "cache" A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : int = features.copy() if features else default_expected_features A_ : str = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]: A_ : Dict = tmp_path / "cache" A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]: if issubclass(_lowerCAmelCase , _lowerCAmelCase ): A_ : int = parquet_path elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): A_ : Optional[int] = [parquet_path] A_ : Optional[int] = tmp_path / "cache" A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) for split in splits: A_ : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]: A_ : Optional[Any] = tmp_path / "cache" A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ : Union[str, Any] = ParquetDatasetReader( {"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple: A_ : Optional[Any] = tmp_path / "cache" A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : List[str] = features.copy() if features else default_expected_features A_ : Tuple = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]: if split: A_ : Any = {split: parquet_path} else: A_ : Optional[Any] = "train" A_ : str = {"train": parquet_path, "test": parquet_path} A_ : Any = tmp_path / "cache" A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict: A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) A_ : Dict = pf.read() assert dataset.data.table == output_table def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]: A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" ) A_ : int = {"image": [image_path]} A_ : Optional[Any] = Features({"image": Image()} ) A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase ) A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any: assert get_writer_batch_size(_lowerCAmelCase ) == expected
300
1