code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( _a ): lowerCamelCase_ : str = ['''image_processor''', '''tokenizer'''] lowerCamelCase_ : Dict = '''FlavaImageProcessor''' lowerCamelCase_ : List[str] = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__(self , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> List[Any]: '''simple docstring''' snake_case_ : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __magic_name__ , ) snake_case_ : Optional[Any] = kwargs.pop('''feature_extractor''' ) snake_case_ : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__magic_name__ , __magic_name__ ) snake_case_ : Tuple = self.image_processor def __call__(self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ) -> Dict: '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: snake_case_ : Optional[int] = self.tokenizer( text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) if images is not None: snake_case_ : Any = self.image_processor( __magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) if text is not None and images is not None: encoding.update(__magic_name__ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> List[Any]: '''simple docstring''' return self.tokenizer.decode(*__magic_name__ , **__magic_name__ ) @property def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Tuple = self.tokenizer.model_input_names snake_case_ : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowerCamelCase (self ) -> str: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __magic_name__ , ) return self.image_processor_class @property def lowerCamelCase (self ) -> List[str]: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __magic_name__ , ) return self.image_processor
60
from __future__ import annotations from cmath import sqrt def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> tuple[complex, complex]: if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) A_ : Tuple = b * b - 4 * a * c A_ : Any = (-b + sqrt(_lowerCAmelCase )) / (2 * a) A_ : Union[str, Any] = (-b - sqrt(_lowerCAmelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __snake_case ( ) -> List[Any]: A_ , A_ : Optional[Any] = quadratic_roots(a=5 , b=6 , c=1 ) print(f"The solutions are: {solutiona} and {solutiona}" ) if __name__ == "__main__": main()
454
0
'''simple docstring''' import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: int=0 ) -> Tuple: """simple docstring""" lowercase__ = np.random.RandomState(_lowerCAmelCase ) lowercase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def lowerCamelCase_ ( self: Union[str, Any] ) -> Any: """simple docstring""" lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = self.get_dummy_inputs() lowercase__ = pipe(**_lowerCAmelCase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self: List[str] ) -> List[Any]: """simple docstring""" lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowercase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = self.get_dummy_inputs() lowercase__ = pipe(**_lowerCAmelCase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self: Dict ) -> Tuple: """simple docstring""" lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = self.get_dummy_inputs() lowercase__ = pipe(**_lowerCAmelCase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self: Optional[Any] ) -> Any: """simple docstring""" lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowercase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = self.get_dummy_inputs() lowercase__ = pipe(**_lowerCAmelCase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowercase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = self.get_dummy_inputs() lowercase__ = pipe(**_lowerCAmelCase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self: Tuple ) -> List[str]: """simple docstring""" lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = self.get_dummy_inputs() lowercase__ = pipe(**_lowerCAmelCase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self: Dict ) -> Any: """simple docstring""" lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = self.get_dummy_inputs() lowercase__ = 3 * [inputs['''prompt''']] # forward lowercase__ = pipe(**_lowerCAmelCase ) lowercase__ = output.images[0, -3:, -3:, -1] lowercase__ = self.get_dummy_inputs() lowercase__ = 3 * [inputs.pop('''prompt''' )] lowercase__ = pipe.tokenizer( _lowerCAmelCase , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='''np''' , ) lowercase__ = text_inputs['''input_ids'''] lowercase__ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] lowercase__ = prompt_embeds # forward lowercase__ = pipe(**_lowerCAmelCase ) lowercase__ = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = self.get_dummy_inputs() lowercase__ = 3 * ['''this is a negative prompt'''] lowercase__ = negative_prompt lowercase__ = 3 * [inputs['''prompt''']] # forward lowercase__ = pipe(**_lowerCAmelCase ) lowercase__ = output.images[0, -3:, -3:, -1] lowercase__ = self.get_dummy_inputs() lowercase__ = 3 * [inputs.pop('''prompt''' )] lowercase__ = [] for p in [prompt, negative_prompt]: lowercase__ = pipe.tokenizer( _lowerCAmelCase , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='''np''' , ) lowercase__ = text_inputs['''input_ids'''] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) lowercase__ , lowercase__ = embeds # forward lowercase__ = pipe(**_lowerCAmelCase ) lowercase__ = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class _a ( unittest.TestCase ): @property def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" lowercase__ = ort.SessionOptions() lowercase__ = False return options def lowerCamelCase_ ( self: List[Any] ) -> str: """simple docstring""" lowercase__ = OnnxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = '''A painting of a squirrel eating a burger''' np.random.seed(0 ) lowercase__ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = DDIMScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowercase__ = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = '''open neural network exchange''' lowercase__ = np.random.RandomState(0 ) lowercase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type='''np''' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCamelCase_ ( self: Dict ) -> Optional[int]: """simple docstring""" lowercase__ = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowercase__ = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = '''open neural network exchange''' lowercase__ = np.random.RandomState(0 ) lowercase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type='''np''' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCamelCase_ ( self: Optional[Any] ) -> str: """simple docstring""" lowercase__ = 0 def test_callback_fn(UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] ) -> None: lowercase__ = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 lowercase__ = False lowercase__ = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ = '''Andromeda galaxy in a bottle''' lowercase__ = np.random.RandomState(0 ) pipe( prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def lowerCamelCase_ ( self: Optional[int] ) -> List[str]: """simple docstring""" lowercase__ = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert pipe.safety_checker is None lowercase__ = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_lowerCAmelCase ) lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowercase__ = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None
709
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(SCREAMING_SNAKE_CASE , n - 1 , SCREAMING_SNAKE_CASE ) * a) % mod else: lowercase__ = binary_exponentiation(SCREAMING_SNAKE_CASE , n / 2 , SCREAMING_SNAKE_CASE ) return (b * b) % mod # a prime number lowerCAmelCase = 701 lowerCAmelCase = 10_0000_0000 lowerCAmelCase = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
429
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class lowercase_ ( __lowercase ): """simple docstring""" UpperCAmelCase_ : Dict = """ibert""" def __init__( self , __SCREAMING_SNAKE_CASE=30522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ) ->Any: super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = quant_mode lowerCAmelCase = force_dequant class lowercase_ ( __lowercase ): """simple docstring""" @property def SCREAMING_SNAKE_CASE_ ( self ) ->str: if self.task == "multiple-choice": lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
312
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _lowerCAmelCase = logging.getLogger(__name__) def _snake_case ( __snake_case , __snake_case ): return (preds == labels).mean() @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} ) UpperCAmelCase = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Set seed set_seed(training_args.seed ) try: _UpperCamelCase = processors[data_args.task_name]() _UpperCamelCase = processor.get_labels() _UpperCamelCase = len(__snake_case ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) _UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , ) # Get datasets _UpperCamelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _UpperCamelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__snake_case ) -> Dict: _UpperCamelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__snake_case , p.label_ids )} # Data collator _UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _UpperCamelCase = Trainer( model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _UpperCamelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCamelCase = trainer.evaluate() _UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(__snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __snake_case , __snake_case ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__snake_case ) return results def _snake_case ( __snake_case ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
10
0
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCamelCase :Optional[int] = logging.get_logger(__name__) lowerCamelCase :Optional[Any] = '''▁''' lowerCamelCase :List[str] = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCamelCase :List[Any] = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } lowerCamelCase :Tuple = { '''facebook/m2m100_418M''': 1_0_2_4, } # fmt: off lowerCamelCase :List[str] = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask'] __SCREAMING_SNAKE_CASE : List[int] = [] __SCREAMING_SNAKE_CASE : List[int] = [] def __init__(self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<pad>" , lowercase="<unk>" , lowercase="m2m100" , lowercase = None , lowercase=8 , **lowercase , ): A_ : int = {} if sp_model_kwargs is None else sp_model_kwargs A_ : Any = language_codes A_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES[language_codes] A_ : int = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code} A_ : int = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(lowercase ) for lang_code in fairseq_language_code if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , ) A_ : List[Any] = vocab_file A_ : Optional[int] = load_json(lowercase ) A_ : str = {v: k for k, v in self.encoder.items()} A_ : Optional[Any] = spm_file A_ : List[Any] = load_spm(lowercase , self.sp_model_kwargs ) A_ : int = len(self.encoder ) A_ : List[str] = { self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase ) } A_ : int = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )} A_ : List[str] = {v: k for k, v in self.lang_token_to_id.items()} A_ : Dict = src_lang if src_lang is not None else """en""" A_ : Tuple = tgt_lang A_ : Optional[int] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) A_ : Union[str, Any] = num_madeup_words @property def _a (self ): return len(self.encoder ) + len(self.lang_token_to_id ) @property def _a (self ): return self._src_lang @src_lang.setter def _a (self , lowercase ): A_ : Optional[int] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _a (self , lowercase ): return self.sp_model.encode(lowercase , out_type=lowercase ) def _a (self , lowercase ): if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(lowercase , self.encoder[self.unk_token] ) def _a (self , lowercase ): if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(lowercase , self.unk_token ) def _a (self , lowercase ): A_ : str = [] A_ : Any = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase ) + token A_ : str = [] else: current_sub_tokens.append(lowercase ) out_string += self.sp_model.decode(lowercase ) return out_string.strip() def _a (self , lowercase , lowercase = None , lowercase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase ) A_ : str = [1] * len(self.prefix_tokens ) A_ : Dict = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(lowercase )) + suffix_ones return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones def _a (self , lowercase , lowercase = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _a (self ): A_ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): A_ : Any = self.__dict__.copy() A_ : Any = None return state def __setstate__(self , lowercase ): A_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A_ : int = {} A_ : Dict = load_spm(self.spm_file , self.sp_model_kwargs ) def _a (self , lowercase , lowercase = None ): A_ : Optional[Any] = Path(lowercase ) if not save_dir.is_dir(): raise OSError(F'{save_directory} should be a directory' ) A_ : Optional[int] = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) A_ : List[str] = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , lowercase ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , lowercase ) elif not os.path.isfile(self.spm_file ): with open(lowercase , """wb""" ) as fi: A_ : Any = self.sp_model.serialized_model_proto() fi.write(lowercase ) return (str(lowercase ), str(lowercase )) def _a (self , lowercase , lowercase = "en" , lowercase = None , lowercase = "ro" , **lowercase , ): A_ : Any = src_lang A_ : Optional[Any] = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase , **lowercase ): if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) A_ : Optional[int] = src_lang A_ : Optional[Any] = self(lowercase , add_special_tokens=lowercase , **lowercase ) A_ : int = self.get_lang_id(lowercase ) A_ : Dict = tgt_lang_id return inputs def _a (self ): self.set_src_lang_special_tokens(self.src_lang ) def _a (self ): self.set_tgt_lang_special_tokens(self.tgt_lang ) def _a (self , lowercase ): A_ : List[Any] = self.get_lang_token(lowercase ) A_ : Union[str, Any] = self.lang_token_to_id[lang_token] A_ : List[str] = [self.cur_lang_id] A_ : Union[str, Any] = [self.eos_token_id] def _a (self , lowercase ): A_ : List[str] = self.get_lang_token(lowercase ) A_ : List[str] = self.lang_token_to_id[lang_token] A_ : List[Any] = [self.cur_lang_id] A_ : List[str] = [self.eos_token_id] def _a (self , lowercase ): return self.lang_code_to_token[lang] def _a (self , lowercase ): A_ : Optional[Any] = self.get_lang_token(lowercase ) return self.lang_token_to_id[lang_token] def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : str = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ ) spm.Load(str(lowerCamelCase__ ) ) return spm def a ( lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ , """r""" ) as f: return json.load(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ , """w""" ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
686
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCamelCase :Any = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def a ( lowerCamelCase__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCamelCase :Tuple = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCamelCase :List[Any] = parser.parse_args() if args.check_lib: lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''') lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent else: lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
686
1
def a ( A__ ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = [0] * len(_SCREAMING_SNAKE_CASE ) for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # use last results for better performance - dynamic programming SCREAMING_SNAKE_CASE__ : Any = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: SCREAMING_SNAKE_CASE__ : str = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 SCREAMING_SNAKE_CASE__ : Optional[Any] = j return prefix_result def a ( A__ ) -> str: '''simple docstring''' return max(prefix_function(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod()
35
'''simple docstring''' import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dataset , _SCREAMING_SNAKE_CASE : Dict[str, str] ): __a : int = args.log_outputs __a : Dict = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric __a : int = load_metric('wer' ) __a : Tuple = load_metric('cer' ) # compute metrics __a : Tuple = wer.compute(references=result['target'] , predictions=result['prediction'] ) __a : Union[str, Any] = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results __a : Any = F"""WER: {wer_result}\nCER: {cer_result}""" print(_SCREAMING_SNAKE_CASE ) with open(F"""{dataset_id}_eval_results.txt""" , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __a : Optional[Any] = F"""log_{dataset_id}_predictions.txt""" __a : int = F"""log_{dataset_id}_targets.txt""" with open(_SCREAMING_SNAKE_CASE , 'w' ) as p, open(_SCREAMING_SNAKE_CASE , 'w' ) as t: # mapping function to write output def write_to_file(_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ): p.write(F"""{i}""" + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(F"""{i}""" + '\n' ) t.write(batch['target'] + '\n' ) result.map(_SCREAMING_SNAKE_CASE , with_indices=_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : List[Any] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __a : int = re.sub(_SCREAMING_SNAKE_CASE , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __a : Tuple = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: __a : Optional[int] = ' '.join(text.split(_SCREAMING_SNAKE_CASE ) ) return text def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): # load dataset __a : Any = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_SCREAMING_SNAKE_CASE ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __a : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) __a : Union[str, Any] = feature_extractor.sampling_rate # resample audio __a : Any = dataset.cast_column('audio' , Audio(sampling_rate=_SCREAMING_SNAKE_CASE ) ) # load eval pipeline if args.device is None: __a : Any = 0 if torch.cuda.is_available() else -1 __a : List[Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_SCREAMING_SNAKE_CASE : str ): __a : Any = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __a : List[Any] = prediction['text'] __a : Optional[Any] = normalize_text(batch['sentence'] ) return batch # run inference on all examples __a : Any = dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : List[Any] = argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) __lowercase : Any = parser.parse_args() main(args)
476
0
"""simple docstring""" from collections import deque class __lowercase : '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : int = process_name # process name __a : str = arrival_time # arrival time of the process # completion time of finished process or last interrupted time __a : List[Any] = arrival_time __a : Dict = burst_time # remaining burst time __a : List[Any] = 0 # total time of the process wait in ready queue __a : Any = 0 # time from arrival time to completion time class __lowercase : '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): # total number of mlfq's queues __a : int = number_of_queues # time slice of queues that round robin algorithm applied __a : int = time_slices # unfinished process is in this ready_queue __a : Optional[int] = queue # current time __a : Optional[Any] = current_time # finished process is in this sequence queue __a : deque[Process] = deque() def _lowerCamelCase ( self ): __a : List[Any] = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def _lowerCamelCase ( self , _UpperCAmelCase ): __a : Optional[Any] = [] for i in range(len(_UpperCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def _lowerCamelCase ( self , _UpperCAmelCase ): __a : Dict = [] for i in range(len(_UpperCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def _lowerCamelCase ( self , _UpperCAmelCase ): __a : int = [] for i in range(len(_UpperCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def _lowerCamelCase ( self , _UpperCAmelCase ): return [q.burst_time for q in queue] def _lowerCamelCase ( self , _UpperCAmelCase ): process.waiting_time += self.current_time - process.stop_time return process.waiting_time def _lowerCamelCase ( self , _UpperCAmelCase ): __a : deque[Process] = deque() # sequence deque of finished process while len(_UpperCAmelCase ) != 0: __a : List[str] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_UpperCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 __a : int = 0 # set the process's turnaround time because it is finished __a : str = self.current_time - cp.arrival_time # set the completion time __a : List[str] = self.current_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): __a : deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_UpperCAmelCase ) ): __a : str = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_UpperCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time __a : Tuple = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_UpperCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished __a : Optional[Any] = 0 # set the finish time __a : Optional[int] = self.current_time # update the process' turnaround time because it is finished __a : Dict = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_UpperCAmelCase ) self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def _lowerCamelCase ( self ): # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): __a , __a : List[str] = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest A = Process('''P1''', 0, 53) A = Process('''P2''', 0, 17) A = Process('''P3''', 0, 68) A = Process('''P4''', 0, 24) A = 3 A = [17, 25] A = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) A = Process('''P1''', 0, 53) A = Process('''P2''', 0, 17) A = Process('''P3''', 0, 68) A = Process('''P4''', 0, 24) A = 3 A = [17, 25] A = deque([Pa, Pa, Pa, Pa]) A = MLFQ(number_of_queues, time_slices, queue, 0) A = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( F'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( F'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
101
"""simple docstring""" import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowercase ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = OpenAIGPTTokenizer __lowerCAmelCase = OpenAIGPTTokenizerFast __lowerCAmelCase = True __lowerCAmelCase = False def _lowerCamelCase ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __a : List[str] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __a : str = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __a : List[Any] = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', ''''''] __a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(_UpperCAmelCase ) ) def _lowerCamelCase ( self , _UpperCAmelCase ): return "lower newer", "lower newer" def _lowerCamelCase ( self ): __a : Tuple = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) __a : Tuple = '''lower''' __a : Union[str, Any] = ['''low''', '''er</w>'''] __a : str = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __a : List[Any] = tokens + ['''<unk>'''] __a : Tuple = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def _lowerCamelCase ( self , _UpperCAmelCase=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __a : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) # Simple input __a : str = '''This is a simple input''' __a : Dict = ['''This is a simple input 1''', '''This is a simple input 2'''] __a : List[Any] = ('''This is a simple input''', '''This is a pair''') __a : Optional[Any] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' ) # Simple input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' ) # Simple input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' ) # Pair input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , ) def _lowerCamelCase ( self ): pass @require_ftfy @require_spacy @require_tokenizers class __lowercase ( _UpperCamelCase ): '''simple docstring''' pass
101
1
"""simple docstring""" import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def UpperCAmelCase ( snake_case : Tuple , snake_case : int , snake_case : Dict , snake_case : Union[str, Any]=5 ): assert masked_input.count('''<mask>''' ) == 1 _lowerCAmelCase:Union[str, Any] = torch.tensor(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ).unsqueeze(0 ) # Batch size 1 _lowerCAmelCase:int = model(snake_case )[0] # The last hidden-state is the first element of the output tuple _lowerCAmelCase:Any = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _lowerCAmelCase:Optional[int] = logits[0, masked_index, :] _lowerCAmelCase:List[str] = logits.softmax(dim=0 ) _lowerCAmelCase:List[str] = prob.topk(k=snake_case , dim=0 ) _lowerCAmelCase:str = """ """.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case ) )] ) _lowerCAmelCase:Optional[Any] = tokenizer.mask_token _lowerCAmelCase:str = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): _lowerCAmelCase:Union[str, Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(snake_case ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(snake_case ) , snake_case ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(snake_case , snake_case ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCamelCase__ = CamembertTokenizer.from_pretrained('''camembert-base''') UpperCamelCase__ = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() UpperCamelCase__ = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
227
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' __a : Optional[int] = StableUnCLIPPipeline __a : int = TEXT_TO_IMAGE_PARAMS __a : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS __a : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS __a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __a : Tuple = False def snake_case__ ( self ) -> Optional[Any]: """simple docstring""" lowercase_ : int = 32 lowercase_ : Tuple = embedder_hidden_size # prior components torch.manual_seed(0 ) lowercase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) lowercase_ : Dict = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=snake_case__, projection_dim=snake_case__, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, ) ) torch.manual_seed(0 ) lowercase_ : Tuple = PriorTransformer( num_attention_heads=2, attention_head_dim=12, embedding_dim=snake_case__, num_layers=1, ) torch.manual_seed(0 ) lowercase_ : Optional[Any] = DDPMScheduler( variance_type="""fixed_small_log""", prediction_type="""sample""", num_train_timesteps=10_00, clip_sample=snake_case__, clip_sample_range=5.0, beta_schedule="""squaredcos_cap_v2""", ) # regular denoising components torch.manual_seed(0 ) lowercase_ : Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=snake_case__ ) lowercase_ : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) lowercase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) lowercase_ : List[Any] = CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=snake_case__, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, ) ) torch.manual_seed(0 ) lowercase_ : Optional[int] = UNetaDConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="""projection""", projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=snake_case__, layers_per_block=1, upcast_attention=snake_case__, use_linear_projection=snake_case__, ) torch.manual_seed(0 ) lowercase_ : Dict = DDIMScheduler( beta_schedule="""scaled_linear""", beta_start=0.00085, beta_end=0.012, prediction_type="""v_prediction""", set_alpha_to_one=snake_case__, steps_offset=1, ) torch.manual_seed(0 ) lowercase_ : str = AutoencoderKL() lowercase_ : str = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def snake_case__ ( self, snake_case__, snake_case__=0 ) -> str: """simple docstring""" if str(snake_case__ ).startswith("""mps""" ): lowercase_ : Tuple = torch.manual_seed(snake_case__ ) else: lowercase_ : int = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) lowercase_ : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def snake_case__ ( self ) -> List[Any]: """simple docstring""" lowercase_ : int = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=snake_case__ ) def snake_case__ ( self ) -> Any: """simple docstring""" lowercase_ : List[str] = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=snake_case__ ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def snake_case__ ( self ) -> Any: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self ) -> List[str]: """simple docstring""" lowercase_ : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) lowercase_ : List[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""", torch_dtype=torch.floataa ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase_ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase_ : Dict = pipe("""anime turle""", generator=snake_case__, output_type="""np""" ) lowercase_ : str = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(snake_case__, snake_case__ ) def snake_case__ ( self ) -> Dict: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase_ : str = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""", torch_dtype=torch.floataa ) lowercase_ : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase_ : List[Any] = pipe( """anime turtle""", prior_num_inference_steps=2, num_inference_steps=2, output_type="""np""", ) lowercase_ : Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
458
0
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class _a (__magic_name__ ): '''simple docstring''' UpperCAmelCase__: str = (IPNDMScheduler,) UpperCAmelCase__: Union[str, Any] = (('''num_inference_steps''', 50),) def __A ( self , **A__ ): A__ : str = {"""num_train_timesteps""": 1000} config.update(**A__ ) return config def __A ( self , A__=0 , **A__ ): A__ : Optional[Any] = dict(self.forward_default_kwargs ) A__ : str = kwargs.pop("""num_inference_steps""" , A__ ) A__ : Optional[int] = self.dummy_sample A__ : List[str] = 0.1 * sample A__ : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: A__ : Union[str, Any] = self.get_scheduler_config(**A__ ) A__ : Optional[Any] = scheduler_class(**A__ ) scheduler.set_timesteps(A__ ) # copy over dummy past residuals A__ : Dict = dummy_past_residuals[:] if time_step is None: A__ : Dict = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A__ ) A__ : Dict = scheduler_class.from_pretrained(A__ ) new_scheduler.set_timesteps(A__ ) # copy over dummy past residuals A__ : List[Any] = dummy_past_residuals[:] A__ : Dict = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample A__ : int = new_scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A__ : Optional[Any] = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample A__ : Tuple = new_scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __A ( self ): pass def __A ( self , A__=0 , **A__ ): A__ : Union[str, Any] = dict(self.forward_default_kwargs ) A__ : int = kwargs.pop("""num_inference_steps""" , A__ ) A__ : Optional[int] = self.dummy_sample A__ : str = 0.1 * sample A__ : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: A__ : str = self.get_scheduler_config() A__ : List[Any] = scheduler_class(**A__ ) scheduler.set_timesteps(A__ ) # copy over dummy past residuals (must be after setting timesteps) A__ : Union[str, Any] = dummy_past_residuals[:] if time_step is None: A__ : Any = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A__ ) A__ : List[str] = scheduler_class.from_pretrained(A__ ) # copy over dummy past residuals new_scheduler.set_timesteps(A__ ) # copy over dummy past residual (must be after setting timesteps) A__ : Any = dummy_past_residuals[:] A__ : Dict = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample A__ : List[str] = new_scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A__ : Dict = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample A__ : Union[str, Any] = new_scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __A ( self , **A__ ): A__ : Any = self.scheduler_classes[0] A__ : List[Any] = self.get_scheduler_config(**A__ ) A__ : Optional[Any] = scheduler_class(**A__ ) A__ : Union[str, Any] = 10 A__ : int = self.dummy_model() A__ : Tuple = self.dummy_sample_deter scheduler.set_timesteps(A__ ) for i, t in enumerate(scheduler.timesteps ): A__ : Union[str, Any] = model(A__ , A__ ) A__ : Any = scheduler.step(A__ , A__ , A__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): A__ : Tuple = model(A__ , A__ ) A__ : Optional[int] = scheduler.step(A__ , A__ , A__ ).prev_sample return sample def __A ( self ): A__ : Optional[Any] = dict(self.forward_default_kwargs ) A__ : Any = kwargs.pop("""num_inference_steps""" , A__ ) for scheduler_class in self.scheduler_classes: A__ : Dict = self.get_scheduler_config() A__ : Union[str, Any] = scheduler_class(**A__ ) A__ : str = self.dummy_sample A__ : int = 0.1 * sample if num_inference_steps is not None and hasattr(A__ , """set_timesteps""" ): scheduler.set_timesteps(A__ ) elif num_inference_steps is not None and not hasattr(A__ , """set_timesteps""" ): A__ : int = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A__ : int = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] A__ : int = dummy_past_residuals[:] A__ : str = scheduler.timesteps[5] A__ : List[str] = scheduler.timesteps[6] A__ : List[str] = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample A__ : Union[str, Any] = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A__ : str = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample A__ : Dict = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __A ( self ): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=A__ , time_step=A__ ) def __A ( self ): for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=A__ , time_step=A__ ) def __A ( self ): A__ : Any = self.full_loop() A__ : int = torch.mean(torch.abs(A__ ) ) assert abs(result_mean.item() - 254_0529 ) < 10
708
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _a (unittest.TestCase ): '''simple docstring''' def __A ( self , A__ ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): A__ : str = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(A__ ) def __A ( self ): A__ : Dict = """sshleifer/tiny-gpt2""" A__ : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) A__ : int = PyTorchBenchmark(A__ ) A__ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ): A__ : Dict = """sgugger/tiny-distilbert-classification""" A__ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , ) A__ : str = PyTorchBenchmark(A__ ) A__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ): A__ : Any = """sshleifer/tiny-gpt2""" A__ : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) A__ : Tuple = PyTorchBenchmark(A__ ) A__ : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def __A ( self ): A__ : Optional[Any] = """sshleifer/tiny-gpt2""" A__ : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) A__ : str = PyTorchBenchmark(A__ ) A__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ): A__ : Optional[Any] = """sshleifer/tiny-gpt2""" A__ : Tuple = AutoConfig.from_pretrained(A__ ) # set architectures equal to `None` A__ : List[Any] = None A__ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] ) A__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ): A__ : Optional[int] = """sshleifer/tiny-gpt2""" A__ : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) A__ : Any = PyTorchBenchmark(A__ ) A__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" ) def __A ( self ): A__ : Optional[int] = """sshleifer/tiny-gpt2""" A__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , ) A__ : Dict = PyTorchBenchmark(A__ ) A__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __A ( self ): A__ : int = """sshleifer/tiny-gpt2""" A__ : Optional[int] = AutoConfig.from_pretrained(A__ ) A__ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) A__ : int = PyTorchBenchmark(A__ , configs=[config] ) A__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ): A__ : List[str] = """sshleifer/tinier_bart""" A__ : List[str] = AutoConfig.from_pretrained(A__ ) A__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] ) A__ : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __A ( self ): A__ : Optional[int] = """sshleifer/tiny-gpt2""" A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ ) A__ : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) A__ : int = PyTorchBenchmark(A__ , configs=[config] ) A__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __A ( self ): A__ : Dict = """sshleifer/tinier_bart""" A__ : int = AutoConfig.from_pretrained(A__ ) A__ : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] ) A__ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __A ( self ): A__ : int = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: A__ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , ) A__ : Optional[Any] = PyTorchBenchmark(A__ ) benchmark.run() self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() ) def __A ( self ): A__ : Optional[int] = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(A__ ): self.assertTrue(hasattr(A__ , """sequential""" ) ) self.assertTrue(hasattr(A__ , """cumulative""" ) ) self.assertTrue(hasattr(A__ , """current""" ) ) self.assertTrue(hasattr(A__ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: A__ : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , ) A__ : Dict = PyTorchBenchmark(A__ ) A__ : str = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
64
0
"""simple docstring""" import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated a__ : Dict = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ a__ : List[Any] = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = numpy.dtype(numpy.uintaa ).newbyteorder(">" ) return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCAmelCase_ )[0] @deprecated(lowerCAmelCase_ , "Please use tf.data to implement this functionality." ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' print("Extracting" , f.name ) with gzip.GzipFile(fileobj=lowerCAmelCase_ ) as bytestream: __SCREAMING_SNAKE_CASE = _readaa(lowerCAmelCase_ ) if magic != 2051: raise ValueError( "Invalid magic number %d in MNIST image file: %s" % (magic, f.name) ) __SCREAMING_SNAKE_CASE = _readaa(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = _readaa(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = _readaa(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = bytestream.read(rows * cols * num_images ) __SCREAMING_SNAKE_CASE = numpy.frombuffer(lowerCAmelCase_ , dtype=numpy.uinta ) __SCREAMING_SNAKE_CASE = data.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1 ) return data @deprecated(lowerCAmelCase_ , "Please use tf.one_hot on tensors." ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = labels_dense.shape[0] __SCREAMING_SNAKE_CASE = numpy.arange(lowerCAmelCase_ ) * num_classes __SCREAMING_SNAKE_CASE = numpy.zeros((num_labels, num_classes) ) __SCREAMING_SNAKE_CASE = 1 return labels_one_hot @deprecated(lowerCAmelCase_ , "Please use tf.data to implement this functionality." ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=10 ): '''simple docstring''' print("Extracting" , f.name ) with gzip.GzipFile(fileobj=lowerCAmelCase_ ) as bytestream: __SCREAMING_SNAKE_CASE = _readaa(lowerCAmelCase_ ) if magic != 2049: raise ValueError( "Invalid magic number %d in MNIST label file: %s" % (magic, f.name) ) __SCREAMING_SNAKE_CASE = _readaa(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = bytestream.read(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = numpy.frombuffer(lowerCAmelCase_ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(lowerCAmelCase_ , lowerCAmelCase_ ) return labels class UpperCamelCase_ : """simple docstring""" @deprecated( UpperCAmelCase__ , "Please use alternatives such as official/mnist/_DataSet.py" " from tensorflow/models." , ) def __init__( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : str=dtypes.floataa , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Dict=None , ) -> Dict: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = random_seed.get_seed(UpperCAmelCase__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __SCREAMING_SNAKE_CASE = dtypes.as_dtype(UpperCAmelCase__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype ) if fake_data: __SCREAMING_SNAKE_CASE = 1_0_0_0_0 __SCREAMING_SNAKE_CASE = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F"""images.shape: {images.shape} labels.shape: {labels.shape}""" __SCREAMING_SNAKE_CASE = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __SCREAMING_SNAKE_CASE = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __SCREAMING_SNAKE_CASE = images.astype(numpy.floataa ) __SCREAMING_SNAKE_CASE = numpy.multiply(UpperCAmelCase__ , 1.0 / 255.0 ) __SCREAMING_SNAKE_CASE = images __SCREAMING_SNAKE_CASE = labels __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 @property def UpperCAmelCase_ ( self : Dict ) -> Dict: return self._images @property def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]: return self._labels @property def UpperCAmelCase_ ( self : List[str] ) -> List[Any]: return self._num_examples @property def UpperCAmelCase_ ( self : Any ) -> Tuple: return self._epochs_completed def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : List[str]=True ) -> int: if fake_data: __SCREAMING_SNAKE_CASE = [1] * 7_8_4 __SCREAMING_SNAKE_CASE = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(UpperCAmelCase__ )], [fake_label for _ in range(UpperCAmelCase__ )], ) __SCREAMING_SNAKE_CASE = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __SCREAMING_SNAKE_CASE = numpy.arange(self._num_examples ) numpy.random.shuffle(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.images[perma] __SCREAMING_SNAKE_CASE = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __SCREAMING_SNAKE_CASE = self._num_examples - start __SCREAMING_SNAKE_CASE = self._images[start : self._num_examples] __SCREAMING_SNAKE_CASE = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __SCREAMING_SNAKE_CASE = numpy.arange(self._num_examples ) numpy.random.shuffle(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.images[perm] __SCREAMING_SNAKE_CASE = self.labels[perm] # Start next epoch __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = batch_size - rest_num_examples __SCREAMING_SNAKE_CASE = self._index_in_epoch __SCREAMING_SNAKE_CASE = self._images[start:end] __SCREAMING_SNAKE_CASE = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __SCREAMING_SNAKE_CASE = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(lowerCAmelCase_ , "Please write your own downloading logic." ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if not gfile.Exists(lowerCAmelCase_ ): gfile.MakeDirs(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) if not gfile.Exists(lowerCAmelCase_ ): urllib.request.urlretrieve(lowerCAmelCase_ , lowerCAmelCase_ ) # noqa: S310 with gfile.GFile(lowerCAmelCase_ ) as f: __SCREAMING_SNAKE_CASE = f.size() print("Successfully downloaded" , lowerCAmelCase_ , lowerCAmelCase_ , "bytes." ) return filepath @deprecated( lowerCAmelCase_ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=dtypes.floataa , lowerCAmelCase_=True , lowerCAmelCase_=5000 , lowerCAmelCase_=None , lowerCAmelCase_=DEFAULT_SOURCE_URL , ): '''simple docstring''' if fake_data: def fake(): return _DataSet( [] , [] , fake_data=lowerCAmelCase_ , one_hot=lowerCAmelCase_ , dtype=lowerCAmelCase_ , seed=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = fake() __SCREAMING_SNAKE_CASE = fake() __SCREAMING_SNAKE_CASE = fake() return _Datasets(train=lowerCAmelCase_ , validation=lowerCAmelCase_ , test=lowerCAmelCase_ ) if not source_url: # empty string check __SCREAMING_SNAKE_CASE = DEFAULT_SOURCE_URL __SCREAMING_SNAKE_CASE = "train-images-idx3-ubyte.gz" __SCREAMING_SNAKE_CASE = "train-labels-idx1-ubyte.gz" __SCREAMING_SNAKE_CASE = "t10k-images-idx3-ubyte.gz" __SCREAMING_SNAKE_CASE = "t10k-labels-idx1-ubyte.gz" __SCREAMING_SNAKE_CASE = _maybe_download( lowerCAmelCase_ , lowerCAmelCase_ , source_url + train_images_file ) with gfile.Open(lowerCAmelCase_ , "rb" ) as f: __SCREAMING_SNAKE_CASE = _extract_images(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = _maybe_download( lowerCAmelCase_ , lowerCAmelCase_ , source_url + train_labels_file ) with gfile.Open(lowerCAmelCase_ , "rb" ) as f: __SCREAMING_SNAKE_CASE = _extract_labels(lowerCAmelCase_ , one_hot=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = _maybe_download( lowerCAmelCase_ , lowerCAmelCase_ , source_url + test_images_file ) with gfile.Open(lowerCAmelCase_ , "rb" ) as f: __SCREAMING_SNAKE_CASE = _extract_images(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = _maybe_download( lowerCAmelCase_ , lowerCAmelCase_ , source_url + test_labels_file ) with gfile.Open(lowerCAmelCase_ , "rb" ) as f: __SCREAMING_SNAKE_CASE = _extract_labels(lowerCAmelCase_ , one_hot=lowerCAmelCase_ ) if not 0 <= validation_size <= len(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = ( "Validation size should be between 0 and " f"""{len(lowerCAmelCase_ )}. Received: {validation_size}.""" ) raise ValueError(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = train_images[:validation_size] __SCREAMING_SNAKE_CASE = train_labels[:validation_size] __SCREAMING_SNAKE_CASE = train_images[validation_size:] __SCREAMING_SNAKE_CASE = train_labels[validation_size:] __SCREAMING_SNAKE_CASE = {"dtype": dtype, "reshape": reshape, "seed": seed} __SCREAMING_SNAKE_CASE = _DataSet(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = _DataSet(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = _DataSet(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) return _Datasets(train=lowerCAmelCase_ , validation=lowerCAmelCase_ , test=lowerCAmelCase_ )
682
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : List[str] = logging.get_logger(__name__) a__ : str = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Any = "xlm-roberta" def __init__( self : int , UpperCAmelCase__ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int , ) -> Tuple: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @property def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"} else: __SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
682
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : List[str] = logging.get_logger(__name__) _a : Tuple = { "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class UpperCamelCase_ ( __UpperCAmelCase ): """simple docstring""" A = '''poolformer''' def __init__( self , UpperCAmelCase=3 , UpperCAmelCase=1_6 , UpperCAmelCase=1_6 , UpperCAmelCase=3 , UpperCAmelCase=4.0 , UpperCAmelCase=[2, 2, 6, 2] , UpperCAmelCase=[6_4, 1_2_8, 3_2_0, 5_1_2] , UpperCAmelCase=[7, 3, 3, 3] , UpperCAmelCase=[4, 2, 2, 2] , UpperCAmelCase=[2, 1, 1, 1] , UpperCAmelCase=4 , UpperCAmelCase=0.0 , UpperCAmelCase="gelu" , UpperCAmelCase=True , UpperCAmelCase=1E-5 , UpperCAmelCase=0.02 , **UpperCAmelCase , ): __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = stride __lowerCamelCase = padding __lowerCamelCase = pool_size __lowerCamelCase = hidden_sizes __lowerCamelCase = mlp_ratio __lowerCamelCase = depths __lowerCamelCase = patch_sizes __lowerCamelCase = strides __lowerCamelCase = num_encoder_blocks __lowerCamelCase = drop_path_rate __lowerCamelCase = hidden_act __lowerCamelCase = use_layer_scale __lowerCamelCase = layer_scale_init_value __lowerCamelCase = initializer_range super().__init__(**lowerCAmelCase_ ) class UpperCamelCase_ ( __UpperCAmelCase ): """simple docstring""" A = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self ): return 2E-3
712
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function _a : Union[str, Any] = 1.0_54_57_18_17e-34 # unit of ℏ : J * s _a : Optional[Any] = 3e8 # unit of c : m * s^-1 def UpperCamelCase__ ( _A: float , _A: float , _A: float ): '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: __lowerCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __lowerCamelCase = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __lowerCamelCase = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
571
0
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ): snake_case__ = get_failure_array(__lowerCAmelCase ) # 2) Step through text searching for pattern snake_case__ , snake_case__ = 0, 0 # index into text, pattern while i < len(__lowerCAmelCase ): if pattern[j] == text[i]: if j == (len(__lowerCAmelCase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case__ = failure[j - 1] continue i += 1 return False def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): snake_case__ = [0] snake_case__ = 0 snake_case__ = 1 while j < len(__lowerCAmelCase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case__ = failure[i - 1] continue j += 1 failure.append(__lowerCAmelCase ) return failure if __name__ == "__main__": # Test 1) __magic_name__ = '''abc1abc12''' __magic_name__ = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' __magic_name__ = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __magic_name__ = '''ABABX''' __magic_name__ = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) __magic_name__ = '''AAAB''' __magic_name__ = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) __magic_name__ = '''abcdabcy''' __magic_name__ = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) __magic_name__ = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
276
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ): raise TypeError("Sequence must be list of non-negative integers" ) for _ in range(len(__lowerCAmelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
276
1
from typing import TYPE_CHECKING from ...utils import _LazyModule __A = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
717
def __A ( _lowercase ): '''simple docstring''' _A = [0] * len(_lowercase ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(_lowercase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowercase ) if cnt != len(_lowercase ): print('''Cycle exists''' ) else: print(_lowercase ) # Adjacency List of Graph __A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
62
0
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split() UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) UpperCamelCase = { """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>""", } UpperCamelCase = { """feature_size""": 1, """padding_value""": 0.0, """sampling_rate""": 1_6000, """return_attention_mask""": False, """do_normalize""": True, } UpperCamelCase = tempfile.mkdtemp() UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase = os.path.join(self.tmpdirname , lowerCamelCase_ ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + """\n""" ) with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + """\n""" ) # load decoder from hub UpperCamelCase = """hf-internal-testing/ngram-beam-search-decoder""" def lowerCamelCase_ ( self : Dict , **lowerCamelCase_ : List[Any] ): """simple docstring""" UpperCamelCase = self.add_kwargs_tokens_map.copy() kwargs.update(lowerCamelCase_ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , **lowerCamelCase_ : Any ): """simple docstring""" return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : Union[str, Any] ): """simple docstring""" return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_feature_extractor() UpperCamelCase = self.get_decoder() UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCamelCase_ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowerCamelCase_ ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["""xx"""] ) with self.assertRaisesRegex(lowerCamelCase_ , """include""" ): WavaVecaProcessorWithLM( tokenizer=lowerCamelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.get_feature_extractor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_decoder() UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ ) UpperCamelCase = floats_list((3, 1000) ) UpperCamelCase = feature_extractor(lowerCamelCase_ , return_tensors="""np""" ) UpperCamelCase = processor(lowerCamelCase_ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.get_feature_extractor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_decoder() UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ ) UpperCamelCase = """This is a test string""" UpperCamelCase = processor(text=lowerCamelCase_ ) UpperCamelCase = tokenizer(lowerCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any]=(2, 10, 16) , lowerCamelCase_ : Tuple=77 ): """simple docstring""" np.random.seed(lowerCamelCase_ ) return np.random.rand(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.get_feature_extractor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_decoder() UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ ) UpperCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCamelCase = processor.decode(lowerCamelCase_ ) UpperCamelCase = decoder.decode_beams(lowerCamelCase_ )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual("""</s> <s> </s>""" , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ["""fork"""], ["""spawn"""]] ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = self.get_feature_extractor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_decoder() UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ ) UpperCamelCase = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCamelCase = processor.batch_decode(lowerCamelCase_ ) else: with get_context(lowerCamelCase_ ).Pool() as pool: UpperCamelCase = processor.batch_decode(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = list(lowerCamelCase_ ) with get_context("""fork""" ).Pool() as p: UpperCamelCase = decoder.decode_beams_batch(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCamelCase_ , decoded_processor.text ) self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text ) self.assertListEqual(lowerCamelCase_ , decoded_processor.logit_score ) self.assertListEqual(lowerCamelCase_ , decoded_processor.lm_score ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.get_feature_extractor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_decoder() UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ ) UpperCamelCase = self._get_dummy_logits() UpperCamelCase = 15 UpperCamelCase = -2_0.0 UpperCamelCase = -4.0 UpperCamelCase = processor.batch_decode( lowerCamelCase_ , beam_width=lowerCamelCase_ , beam_prune_logp=lowerCamelCase_ , token_min_logp=lowerCamelCase_ , ) UpperCamelCase = decoded_processor_out.text UpperCamelCase = list(lowerCamelCase_ ) with get_context("""fork""" ).Pool() as pool: UpperCamelCase = decoder.decode_beams_batch( lowerCamelCase_ , lowerCamelCase_ , beam_width=lowerCamelCase_ , beam_prune_logp=lowerCamelCase_ , token_min_logp=lowerCamelCase_ , ) UpperCamelCase = [d[0][0] for d in decoded_decoder_out] UpperCamelCase = [d[0][2] for d in decoded_decoder_out] UpperCamelCase = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , lowerCamelCase_ ) self.assertTrue(np.array_equal(lowerCamelCase_ , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , lowerCamelCase_ , atol=1E-3 ) ) self.assertTrue(np.array_equal(lowerCamelCase_ , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , lowerCamelCase_ , atol=1E-3 ) ) def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = self.get_feature_extractor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_decoder() UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ ) UpperCamelCase = self._get_dummy_logits() UpperCamelCase = 2.0 UpperCamelCase = 5.0 UpperCamelCase = -2_0.0 UpperCamelCase = True UpperCamelCase = processor.batch_decode( lowerCamelCase_ , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , unk_score_offset=lowerCamelCase_ , lm_score_boundary=lowerCamelCase_ , ) UpperCamelCase = decoded_processor_out.text UpperCamelCase = list(lowerCamelCase_ ) decoder.reset_params( alpha=lowerCamelCase_ , beta=lowerCamelCase_ , unk_score_offset=lowerCamelCase_ , lm_score_boundary=lowerCamelCase_ , ) with get_context("""fork""" ).Pool() as pool: UpperCamelCase = decoder.decode_beams_batch( lowerCamelCase_ , lowerCamelCase_ , ) UpperCamelCase = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , lowerCamelCase_ ) UpperCamelCase = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCamelCase = processor.decoder.model_container[processor.decoder._model_key] UpperCamelCase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCamelCase = os.listdir(lowerCamelCase_ ) UpperCamelCase = ["""alphabet.json""", """language_model"""] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = snapshot_download("""hf-internal-testing/processor_with_lm""" ) UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained(lowerCamelCase_ ) UpperCamelCase = processor.decoder.model_container[processor.decoder._model_key] UpperCamelCase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() UpperCamelCase = os.listdir(lowerCamelCase_ ) UpperCamelCase = os.listdir(lowerCamelCase_ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCamelCase = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCamelCase = floats_list((3, 1000) ) UpperCamelCase = processor_wavaveca(lowerCamelCase_ , return_tensors="""np""" ) UpperCamelCase = processor_auto(lowerCamelCase_ , return_tensors="""np""" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) UpperCamelCase = self._get_dummy_logits() UpperCamelCase = processor_wavaveca.batch_decode(lowerCamelCase_ ) UpperCamelCase = processor_auto.batch_decode(lowerCamelCase_ ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.get_feature_extractor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = self.get_decoder() UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , decoder=lowerCamelCase_ ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , ) @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" UpperCamelCase = [d[key] for d in offsets] return retrieved_list def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCamelCase = self._get_dummy_logits()[0] UpperCamelCase = processor.decode(lowerCamelCase_ , output_word_offsets=lowerCamelCase_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) UpperCamelCase = self._get_dummy_logits() UpperCamelCase = processor.batch_decode(lowerCamelCase_ , output_word_offsets=lowerCamelCase_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertListEqual( [""" """.join(self.get_from_offsets(lowerCamelCase_ , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowerCamelCase_ ( self : str ): """simple docstring""" import torch UpperCamelCase = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=lowerCamelCase_ ) UpperCamelCase = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6000 ) ) UpperCamelCase = iter(lowerCamelCase_ ) UpperCamelCase = next(lowerCamelCase_ ) UpperCamelCase = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) UpperCamelCase = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCamelCase = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values with torch.no_grad(): UpperCamelCase = model(lowerCamelCase_ ).logits.cpu().numpy() UpperCamelCase = processor.decode(logits[0] , output_word_offsets=lowerCamelCase_ ) UpperCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCamelCase = [ { """start_time""": d["""start_offset"""] * time_offset, """end_time""": d["""end_offset"""] * time_offset, """word""": d["""word"""], } for d in output["""word_offsets"""] ] UpperCamelCase = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL""" # output words self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase_ , """word""" ) ) , lowerCamelCase_ ) self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase_ , """word""" ) ) , output.text ) # output times UpperCamelCase = torch.tensor(self.get_from_offsets(lowerCamelCase_ , """start_time""" ) ) UpperCamelCase = torch.tensor(self.get_from_offsets(lowerCamelCase_ , """end_time""" ) ) # fmt: off UpperCamelCase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCamelCase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=0.0_1 ) ) self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=0.0_1 ) )
537
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]: '''simple docstring''' _enforce_args(UpperCamelCase_ , UpperCamelCase_ ) if n == 0: return 0 UpperCamelCase = float("""-inf""" ) for i in range(1 , n + 1 ): UpperCamelCase = max( UpperCamelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase_ ) ) return max_revue def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' _enforce_args(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: UpperCamelCase = float("""-inf""" ) for i in range(1 , n + 1 ): UpperCamelCase = max( UpperCamelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase_ , UpperCamelCase_ ) , ) UpperCamelCase = max_revenue return max_rev[n] def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]: '''simple docstring''' _enforce_args(UpperCamelCase_ , UpperCamelCase_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. UpperCamelCase = [float("""-inf""" ) for _ in range(n + 1 )] UpperCamelCase = 0 for i in range(1 , n + 1 ): UpperCamelCase = max_rev[i] for j in range(1 , i + 1 ): UpperCamelCase = max(UpperCamelCase_ , prices[j - 1] + max_rev[i - j] ) UpperCamelCase = max_revenue_i return max_rev[n] def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]: '''simple docstring''' if n < 0: UpperCamelCase = f"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(UpperCamelCase_ ) if n > len(UpperCamelCase_ ): UpperCamelCase = ( """Each integral piece of rod must have a corresponding price. """ f"""Got n = {n} but length of prices = {len(UpperCamelCase_ )}""" ) raise ValueError(UpperCamelCase_ ) def lowercase( ) -> str: '''simple docstring''' UpperCamelCase = [6, 10, 12, 15, 20, 23] UpperCamelCase = len(UpperCamelCase_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. UpperCamelCase = 36 UpperCamelCase = top_down_cut_rod(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = bottom_up_cut_rod(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = naive_cut_rod_recursive(UpperCamelCase_ , UpperCamelCase_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
537
1
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" @require_torch def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: """simple docstring""" __lowerCAmelCase : List[Any] = """\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n """ __lowerCAmelCase : Optional[Any] = """\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n """ __lowerCAmelCase : Optional[int] = """\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")\nsocket.socket = offline_socket\n """ # Force fetching the files so that we can use the cache __lowerCAmelCase : Optional[int] = """hf-internal-testing/tiny-random-bert""" BertConfig.from_pretrained(lowerCAmelCase ) BertModel.from_pretrained(lowerCAmelCase ) BertTokenizer.from_pretrained(lowerCAmelCase ) pipeline(task="""fill-mask""" , model=lowerCAmelCase ) # baseline - just load from_pretrained with normal network __lowerCAmelCase : Tuple = [sys.executable, """-c""", """\n""".join([load, run, mock] )] # should succeed __lowerCAmelCase : List[str] = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __lowerCAmelCase : Tuple = """1""" __lowerCAmelCase : List[str] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: """simple docstring""" __lowerCAmelCase : Optional[int] = """\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n """ __lowerCAmelCase : Optional[Any] = """\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n """ __lowerCAmelCase : Tuple = """\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n """ # Force fetching the files so that we can use the cache __lowerCAmelCase : Dict = """hf-internal-testing/tiny-random-bert""" BertConfig.from_pretrained(lowerCAmelCase ) BertModel.from_pretrained(lowerCAmelCase ) BertTokenizer.from_pretrained(lowerCAmelCase ) pipeline(task="""fill-mask""" , model=lowerCAmelCase ) # baseline - just load from_pretrained with normal network __lowerCAmelCase : Union[str, Any] = [sys.executable, """-c""", """\n""".join([load, run, mock] )] # should succeed __lowerCAmelCase : Union[str, Any] = self.get_env() __lowerCAmelCase : int = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Optional[int] = """\nfrom transformers import BertConfig, BertModel, BertTokenizer\n """ __lowerCAmelCase : Tuple = """\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n """ __lowerCAmelCase : Tuple = """\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n """ # baseline - just load from_pretrained with normal network __lowerCAmelCase : Union[str, Any] = [sys.executable, """-c""", """\n""".join([load, run] )] # should succeed __lowerCAmelCase : List[Any] = self.get_env() __lowerCAmelCase : Dict = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) # next emulate no network __lowerCAmelCase : Tuple = [sys.executable, """-c""", """\n""".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __lowerCAmelCase : Any = """1""" __lowerCAmelCase : str = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowerCAmelCase : List[str] = """\nfrom transformers import pipeline\n """ __lowerCAmelCase : List[str] = """\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n """ __lowerCAmelCase : List[str] = """\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n """ __lowerCAmelCase : List[str] = self.get_env() __lowerCAmelCase : Any = """1""" __lowerCAmelCase : str = [sys.executable, """-c""", """\n""".join([load, mock, run] )] __lowerCAmelCase : List[str] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( """You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , ) @require_torch def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: """simple docstring""" __lowerCAmelCase : List[str] = """\nfrom transformers import AutoModel\n """ __lowerCAmelCase : int = """\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n """ # baseline - just load from_pretrained with normal network __lowerCAmelCase : Optional[int] = [sys.executable, """-c""", """\n""".join([load, run] )] # should succeed __lowerCAmelCase : Any = self.get_env() __lowerCAmelCase : Optional[Any] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __lowerCAmelCase : Union[str, Any] = """1""" __lowerCAmelCase : Union[str, Any] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() )
716
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case_ (__A : str , __A : str , __A : str , __A : PreTrainedTokenizer , __A : int , __A : Optional[int] = None , ) -> Tuple: __lowerCAmelCase : int = {} if train_file is not None: __lowerCAmelCase : Optional[Any] = [train_file] if eval_file is not None: __lowerCAmelCase : Dict = [eval_file] if test_file is not None: __lowerCAmelCase : Tuple = [test_file] __lowerCAmelCase : Dict = datasets.load_dataset("""csv""" , data_files=__A ) __lowerCAmelCase : Optional[Any] = list(ds[list(files.keys() )[0]].features.keys() ) __lowerCAmelCase : Optional[Any] = features_name.pop(__A ) __lowerCAmelCase : int = list(set(ds[list(files.keys() )[0]][label_name] ) ) __lowerCAmelCase : Optional[Any] = {label: i for i, label in enumerate(__A )} __lowerCAmelCase : Union[str, Any] = tokenizer.model_input_names __lowerCAmelCase : List[Any] = {} if len(__A ) == 1: for k in files.keys(): __lowerCAmelCase : Tuple = ds[k].map( lambda __A : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__A , max_length=__A , padding="""max_length""" ) , batched=__A , ) elif len(__A ) == 2: for k in files.keys(): __lowerCAmelCase : Optional[int] = ds[k].map( lambda __A : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__A , max_length=__A , padding="""max_length""" , ) , batched=__A , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __lowerCAmelCase : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names} __lowerCAmelCase : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __lowerCAmelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names} __lowerCAmelCase : List[str] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __lowerCAmelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names} __lowerCAmelCase : str = labelaid[ex[label_name]] yield (d, label) __lowerCAmelCase : Dict = ( tf.data.Dataset.from_generator( __A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __lowerCAmelCase : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __lowerCAmelCase : Dict = ( tf.data.Dataset.from_generator( __A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __lowerCAmelCase : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __lowerCAmelCase : Optional[Any] = ( tf.data.Dataset.from_generator( __A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __lowerCAmelCase : Any = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __UpperCAmelCase = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE : """simple docstring""" lowerCamelCase : int =field(metadata={"help": "Which column contains the label"} ) lowerCamelCase : str =field(default=a_ , metadata={"help": "The path of the training file"} ) lowerCamelCase : Optional[str] =field(default=a_ , metadata={"help": "The path of the development file"} ) lowerCamelCase : Optional[str] =field(default=a_ , metadata={"help": "The path of the test file"} ) lowerCamelCase : int =field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowerCamelCase : bool =field( default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) @dataclass class SCREAMING_SNAKE_CASE : """simple docstring""" lowerCamelCase : str =field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) lowerCamelCase : Optional[str] =field( default=a_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowerCamelCase : Optional[str] =field( default=a_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) lowerCamelCase : bool =field(default=a_ , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowerCamelCase : Optional[str] =field( default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) def snake_case_ () -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : int = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __lowerCAmelCase : int = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__A ) , labelaid=__A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __lowerCAmelCase : Tuple = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , ) def compute_metrics(__A : EvalPrediction ) -> Dict: __lowerCAmelCase : str = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __lowerCAmelCase : Tuple = TFTrainer( model=__A , args=__A , train_dataset=__A , eval_dataset=__A , compute_metrics=__A , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowerCAmelCase : Dict = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowerCAmelCase : List[str] = trainer.evaluate() __lowerCAmelCase : Any = os.path.join(training_args.output_dir , """eval_results.txt""" ) with open(__A , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__A ) return results if __name__ == "__main__": main()
218
0
'''simple docstring''' from collections import namedtuple lowercase : Optional[int] = namedtuple('from_to', 'from_ to') lowercase : str = { 'cubicmeter': from_to(1, 1), 'litre': from_to(0.0_01, 10_00), 'kilolitre': from_to(1, 1), 'gallon': from_to(0.0_04_54, 2_64.1_72), 'cubicyard': from_to(0.7_64_55, 1.3_07_95), 'cubicfoot': from_to(0.0_28, 35.31_47), 'cup': from_to(0.0_00_23_65_88, 42_26.75), } def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if from_type not in METRIC_CONVERSION: raise ValueError( F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n' + ''', '''.join(snake_case__ ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n' + ''', '''.join(snake_case__ ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
634
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'spiece.model'} lowercase : Tuple = { 'vocab_file': { 'bert_for_seq_generation': ( 'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model' ), } } lowercase : Tuple = {'bert_for_seq_generation': 5_12} class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = [] __magic_name__ = ['''input_ids''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<::::>" , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" A : str = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) A : List[Any] = vocab_file A : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(SCREAMING_SNAKE_CASE ) @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self.sp_model.get_piece_size() def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Any = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Tuple: """simple docstring""" A : Dict = self.__dict__.copy() A : int = None return state def __setstate__( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A : Optional[int] = {} A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Tuple = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE ) return token def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = [] A : Dict = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token A : str = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE ) out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) return out_string.strip() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A : int = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , '''wb''' ) as fi: A : str = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
634
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class a__ ( A__ ): def __init__( self :Any , *_lowerCamelCase :Union[str, Any] , **_lowerCamelCase :int ): '''simple docstring''' warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , _lowerCamelCase , ) super().__init__(*_lowerCamelCase , **_lowerCamelCase )
721
"""simple docstring""" import re from filelock import FileLock try: import nltk __SCREAMING_SNAKE_CASE = True except (ImportError, ModuleNotFoundError): __SCREAMING_SNAKE_CASE = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def A_ ( __lowercase ): re.sub('<n>' , '' , __lowercase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__lowercase ) )
395
0
"""simple docstring""" from math import pi def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" return 2 * pi * radius * (angle / 3_60) if __name__ == "__main__": print(arc_length(9_0, 1_0))
277
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase: Optional[int] = logging.get_logger(__name__) lowerCAmelCase: int = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class a__( lowerCamelCase__ ): lowercase__ = """ctrl""" lowercase__ = ["""past_key_values"""] lowercase__ = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Optional[int] , __snake_case : Union[str, Any]=24_65_34 , __snake_case : Dict=2_56 , __snake_case : Optional[int]=12_80 , __snake_case : Optional[int]=81_92 , __snake_case : Union[str, Any]=48 , __snake_case : str=16 , __snake_case : Any=0.1 , __snake_case : List[Any]=0.1 , __snake_case : str=1e-6 , __snake_case : List[str]=0.02 , __snake_case : int=True , **__snake_case : Any , ): a : int = vocab_size a : Union[str, Any] = n_positions a : Optional[int] = n_embd a : Dict = n_layer a : Union[str, Any] = n_head a : Tuple = dff a : Union[str, Any] = resid_pdrop a : Tuple = embd_pdrop a : int = layer_norm_epsilon a : str = initializer_range a : Any = use_cache super().__init__(**__snake_case )
526
0
from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig A : str = logging.get_logger(__name__) A : List[str] = '''T5Config''' class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[str] = '''mt5''' __lowerCamelCase : str = MTaConfig class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : str = '''mt5''' __lowerCamelCase : Optional[int] = MTaConfig class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[Any] = '''mt5''' __lowerCamelCase : List[Any] = MTaConfig
247
def __lowerCamelCase ( __a :int ) -> list[int]: """simple docstring""" if num <= 0: raise ValueError("""Input must be a positive integer""" ) A__ = [True] * (num + 1) A__ = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , __a ): A__ = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() A : Any = int(input('''Enter a positive integer: ''').strip()) print(prime_sieve_eratosthenes(user_num))
247
1
def snake_case ( snake_case__ :List[Any]) -> Tuple: # noqa: E741 _A = len(snake_case__) _A = 0 _A = [0] * n _A = [False] * n _A = [False] * n def dfs(snake_case__ :Optional[Any] , snake_case__ :Union[str, Any] , snake_case__ :List[str] , snake_case__ :str): if parent == root: out_edge_count += 1 _A = True _A = at for to in l[at]: if to == parent: pass elif not visited[to]: _A = dfs(snake_case__ , snake_case__ , snake_case__ , snake_case__) _A = min(low[at] , low[to]) # AP found via bridge if at < low[to]: _A = True # AP found via cycle if at == low[to]: _A = True else: _A = min(low[at] , snake_case__) return out_edge_count for i in range(snake_case__): if not visited[i]: _A = 0 _A = dfs(snake_case__ , snake_case__ , -1 , snake_case__) _A = out_edge_count > 1 for x in range(len(snake_case__)): if is_art[x] is True: print(snake_case__) # Adjacency list of graph _SCREAMING_SNAKE_CASE = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
401
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class a : """simple docstring""" lowerCamelCase :str lowerCamelCase :str = None @staticmethod def UpperCAmelCase ( ) -> List[str]: raise NotImplementedError def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict: raise NotImplementedError def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: raise NotImplementedError def UpperCAmelCase ( self ) -> List[str]: if not self.is_available(): raise RuntimeError( F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def UpperCAmelCase ( cls ) -> List[str]: return F'''`pip install {cls.pip_package or cls.name}`''' class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Any = '''optuna''' @staticmethod def UpperCAmelCase ( ) -> Tuple: return is_optuna_available() def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: return run_hp_search_optuna(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: return default_hp_space_optuna(lowerCAmelCase_ ) class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Tuple = '''ray''' lowerCamelCase :int = '''\'ray[tune]\'''' @staticmethod def UpperCAmelCase ( ) -> Dict: return is_ray_available() def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> int: return run_hp_search_ray(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: return default_hp_space_ray(lowerCAmelCase_ ) class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Any = '''sigopt''' @staticmethod def UpperCAmelCase ( ) -> Optional[int]: return is_sigopt_available() def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]: return run_hp_search_sigopt(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: return default_hp_space_sigopt(lowerCAmelCase_ ) class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Any = '''wandb''' @staticmethod def UpperCAmelCase ( ) -> int: return is_wandb_available() def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]: return run_hp_search_wandb(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: return default_hp_space_wandb(lowerCAmelCase_ ) _SCREAMING_SNAKE_CASE = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def snake_case ( ) -> str: _A = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(snake_case__) > 0: _A = available_backends[0].name if len(snake_case__) > 1: logger.info( F'''{len(snake_case__)} hyperparameter search backends available. Using {name} as the default.''') return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( F''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()))
401
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging A__ : str = logging.get_logger(__name__) class lowercase__ ( __lowercase ): '''simple docstring''' _UpperCAmelCase :Tuple = ['pixel_values'] def __init__( self : Dict , snake_case__ : str = True , snake_case__ : List[str] = 32 , snake_case__ : Dict=PILImageResampling.BILINEAR , snake_case__ : str = True , **snake_case__ : Tuple , ): lowerCamelCase_ : Union[str, Any] =do_resize lowerCamelCase_ : Tuple =do_rescale lowerCamelCase_ : Optional[Any] =size_divisor lowerCamelCase_ : Optional[int] =resample super().__init__(**__A ) def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Tuple = None , **snake_case__ : Dict ): lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =get_image_size(__A ) # Rounds the height and width down to the closest multiple of size_divisor lowerCamelCase_ : Dict =height // size_divisor * size_divisor lowerCamelCase_ : Any =width // size_divisor * size_divisor lowerCamelCase_ : List[Any] =resize(__A , (new_h, new_w) , resample=__A , data_format=__A , **__A ) return image def UpperCAmelCase__ ( self : int , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[int] = None , **snake_case__ : Optional[Any] ): return rescale(image=__A , scale=__A , data_format=__A , **__A ) def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] = None , snake_case__ : Optional[int] = None , snake_case__ : Any=None , snake_case__ : str = None , snake_case__ : List[str] = None , snake_case__ : int = ChannelDimension.FIRST , **snake_case__ : Optional[int] , ): lowerCamelCase_ : List[Any] =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ : List[str] =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ : Dict =size_divisor if size_divisor is not None else self.size_divisor lowerCamelCase_ : List[str] =resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError("size_divisor is required for resizing" ) lowerCamelCase_ : str =make_list_of_images(__A ) if not valid_images(__A ): raise ValueError("Invalid image(s)" ) # All transformations expect numpy arrays. lowerCamelCase_ : Tuple =[to_numpy_array(__A ) for img in images] if do_resize: lowerCamelCase_ : List[str] =[self.resize(__A , size_divisor=__A , resample=__A ) for image in images] if do_rescale: lowerCamelCase_ : Optional[int] =[self.rescale(__A , scale=1 / 255 ) for image in images] lowerCamelCase_ : List[Any] =[to_channel_dimension_format(__A , __A ) for image in images] lowerCamelCase_ : Tuple ={"pixel_values": images} return BatchFeature(data=__A , tensor_type=__A )
714
"""simple docstring""" import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig A__ : Optional[Any] = logging.get_logger(__name__) A__ : Tuple = 'T5Config' def _snake_case ( lowerCamelCase__ : jnp.array , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> jnp.ndarray: lowerCamelCase_ : Optional[Any] =jnp.zeros_like(lowerCamelCase__ ) lowerCamelCase_ : Dict =shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) lowerCamelCase_ : Union[str, Any] =shifted_input_ids.at[:, 0].set(lowerCamelCase__ ) lowerCamelCase_ : Optional[int] =jnp.where(shifted_input_ids == -100 , lowerCamelCase__ , lowerCamelCase__ ) return shifted_input_ids class lowercase__ ( snake_case__ ): _UpperCAmelCase :Dict = "mt5" _UpperCAmelCase :Tuple = MTaConfig class lowercase__ ( snake_case__ ): _UpperCAmelCase :Any = "mt5" _UpperCAmelCase :Optional[int] = MTaConfig class lowercase__ ( snake_case__ ): _UpperCAmelCase :Any = "mt5" _UpperCAmelCase :Tuple = MTaConfig
244
0
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , ) -> List[str]: __lowerCamelCase : Optional[int] = parent __lowerCamelCase : str = batch_size __lowerCamelCase : List[Any] = image_size __lowerCamelCase : Any = patch_size __lowerCamelCase : Tuple = num_channels __lowerCamelCase : Dict = is_training __lowerCamelCase : List[str] = use_labels __lowerCamelCase : Optional[Any] = hidden_size __lowerCamelCase : int = num_hidden_layers __lowerCamelCase : int = num_attention_heads __lowerCamelCase : str = intermediate_size __lowerCamelCase : Optional[int] = hidden_act __lowerCamelCase : List[str] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : Optional[Any] = type_sequence_label_size __lowerCamelCase : List[str] = initializer_range __lowerCamelCase : Union[str, Any] = scope __lowerCamelCase : Union[str, Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCamelCase : List[Any] = (image_size // patch_size) ** 2 __lowerCamelCase : Optional[int] = num_patches + 2 def lowercase_ ( self ) -> Tuple: __lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : List[Any] = None if self.use_labels: __lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : Dict = self.get_config() return config, pixel_values, labels def lowercase_ ( self ) -> Tuple: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: __lowerCamelCase : Union[str, Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __lowerCamelCase : List[str] = 1 __lowerCamelCase : List[Any] = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Dict = self.type_sequence_label_size __lowerCamelCase : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCamelCase : Tuple = 1 __lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = config_and_inputs __lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Any = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) lowerCamelCase : List[str] = ( { 'feature-extraction': TFDeiTModel, 'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) lowerCamelCase : Optional[int] = False lowerCamelCase : List[Any] = False lowerCamelCase : List[str] = False lowerCamelCase : Any = False def lowercase_ ( self ) -> str: __lowerCamelCase : Any = TFDeiTModelTester(self ) __lowerCamelCase : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def lowercase_ ( self ) -> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def lowercase_ ( self ) -> Union[str, Any]: pass def lowercase_ ( self ) -> int: __lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Any = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __lowerCamelCase : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Dense ) ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : Tuple = [*signature.parameters.keys()] __lowerCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Dict: __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict: __lowerCamelCase : Optional[int] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def lowercase_ ( self ) -> Dict: for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Tuple = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase__ ( ) -> Optional[int]: __lowerCamelCase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> str: return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ) __lowerCamelCase : Union[str, Any] = self.default_image_processor __lowerCamelCase : Dict = prepare_img() __lowerCamelCase : Dict = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='tf' ) # forward pass __lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits __lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
13
def __magic_name__ ( lowercase = 100 ) -> int: """simple docstring""" lowercase_ : Dict = (n * (n + 1) // 2) ** 2 lowercase_ : List[str] = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F'''{solution() = }''')
458
0
"""simple docstring""" import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy _UpperCamelCase : Tuple = logging.get_logger(__name__) _UpperCamelCase : Union[str, Any] = { 'artists_file': 'artists.json', 'lyrics_file': 'lyrics.json', 'genres_file': 'genres.json', } _UpperCamelCase : List[Any] = { 'artists_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json', }, 'genres_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json', }, 'lyrics_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json', }, } _UpperCamelCase : Optional[int] = { 'jukebox': 5_1_2, } class a ( a_ ): UpperCAmelCase_ : Dict =VOCAB_FILES_NAMES UpperCAmelCase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Optional[Any] =PRETRAINED_LYRIC_TOKENS_SIZES UpperCAmelCase_ : Optional[int] =["input_ids", "attention_mask"] def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=["v3", "v2", "v2"] , _lowerCamelCase=5_1_2 , _lowerCamelCase=5 , _lowerCamelCase="<|endoftext|>" , **_lowerCamelCase , ): lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token super().__init__( unk_token=_lowerCamelCase , n_genres=_lowerCamelCase , version=_lowerCamelCase , max_n_lyric_tokens=_lowerCamelCase , **_lowerCamelCase , ) lowercase = version lowercase = max_n_lyric_tokens lowercase = n_genres with open(_lowerCamelCase , encoding='utf-8' ) as vocab_handle: lowercase = json.load(_lowerCamelCase ) with open(_lowerCamelCase , encoding='utf-8' ) as vocab_handle: lowercase = json.load(_lowerCamelCase ) with open(_lowerCamelCase , encoding='utf-8' ) as vocab_handle: lowercase = json.load(_lowerCamelCase ) lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 7_9: lowercase = oov.replace(R'\-\'' , R'\-+\'' ) lowercase = regex.compile(_lowerCamelCase ) lowercase = {v: k for k, v in self.artists_encoder.items()} lowercase = {v: k for k, v in self.genres_encoder.items()} lowercase = {v: k for k, v in self.lyrics_encoder.items()} @property def UpperCamelCase_ ( self ): return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def UpperCamelCase_ ( self ): return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): lowercase = [self.artists_encoder.get(_lowerCamelCase , 0 ) for artist in list_artists] for genres in range(len(_lowerCamelCase ) ): lowercase = [self.genres_encoder.get(_lowerCamelCase , 0 ) for genre in list_genres[genres]] lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) lowercase = [[self.lyrics_encoder.get(_lowerCamelCase , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def UpperCamelCase_ ( self , _lowerCamelCase ): return list(_lowerCamelCase ) def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ): lowercase , lowercase , lowercase = self.prepare_for_tokenization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowercase = self._tokenize(_lowerCamelCase ) return artist, genre, lyrics def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ): for idx in range(len(self.version ) ): if self.version[idx] == "v3": lowercase = artists[idx].lower() lowercase = [genres[idx].lower()] else: lowercase = self._normalize(artists[idx] ) + '.v2' lowercase = [ self._normalize(_lowerCamelCase ) + '.v2' for genre in genres[idx].split('_' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' ) lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n' lowercase = {vocab[index]: index + 1 for index in range(len(_lowerCamelCase ) )} lowercase = 0 lowercase = len(_lowerCamelCase ) + 1 lowercase = self.vocab lowercase = {v: k for k, v in self.vocab.items()} lowercase = '' else: lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' ) lowercase = self._run_strip_accents(_lowerCamelCase ) lowercase = lyrics.replace('\\' , '\n' ) lowercase = self.out_of_vocab.sub('' , _lowerCamelCase ), [], [] return artists, genres, lyrics def UpperCamelCase_ ( self , _lowerCamelCase ): lowercase = unicodedata.normalize('NFD' , _lowerCamelCase ) lowercase = [] for char in text: lowercase = unicodedata.category(_lowerCamelCase ) if cat == "Mn": continue output.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) def UpperCamelCase_ ( self , _lowerCamelCase ): lowercase = ( [chr(_lowerCamelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )] + [chr(_lowerCamelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )] + [chr(_lowerCamelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )] + ['.'] ) lowercase = frozenset(_lowerCamelCase ) lowercase = re.compile(R'_+' ) lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] ) lowercase = pattern.sub('_' , _lowerCamelCase ).strip('_' ) return text def UpperCamelCase_ ( self , _lowerCamelCase ): return " ".join(_lowerCamelCase ) def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): # Convert to TensorType if not isinstance(_lowerCamelCase , _lowerCamelCase ): lowercase = TensorType(_lowerCamelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( 'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' ) import tensorflow as tf lowercase = tf.constant lowercase = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' ) import torch lowercase = torch.tensor lowercase = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' ) import jax.numpy as jnp # noqa: F811 lowercase = jnp.array lowercase = _is_jax else: lowercase = np.asarray lowercase = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: lowercase = [inputs] if not is_tensor(_lowerCamelCase ): lowercase = as_tensor(_lowerCamelCase ) except: # noqa E722 raise ValueError( 'Unable to create tensor, you should probably activate truncation and/or padding ' 'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' ) return inputs def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="pt" ): lowercase = [0, 0, 0] lowercase = [artist] * len(self.version ) lowercase = [genres] * len(self.version ) lowercase , lowercase , lowercase = self.tokenize(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowercase , lowercase , lowercase = self._convert_token_to_id(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowercase = [-INFINITY] * len(full_tokens[-1] ) lowercase = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_lowerCamelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} ) def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowercase = os.path.join( _lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] ) with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=_lowerCamelCase ) ) lowercase = os.path.join( _lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] ) with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=_lowerCamelCase ) ) lowercase = os.path.join( _lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] ) with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_lowerCamelCase ) ) return (artists_file, genres_file, lyrics_file) def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): lowercase = self.artists_decoder.get(_lowerCamelCase ) lowercase = [self.genres_decoder.get(_lowerCamelCase ) for genre in genres_index] lowercase = [self.lyrics_decoder.get(_lowerCamelCase ) for character in lyric_index] return artist, genres, lyrics
134
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def _SCREAMING_SNAKE_CASE ( __snake_case : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowercase = 2 while True: if is_prime(__snake_case ): yield num num += 1 def _SCREAMING_SNAKE_CASE ( __snake_case : int = 2_00_00_00 ): '''simple docstring''' return sum(takewhile(lambda __snake_case : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
134
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__) lowerCAmelCase_ : str = { 'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='timesformer' def __init__( self : Optional[int] , __a : Optional[int]=2_24 , __a : Tuple=16 , __a : int=3 , __a : Union[str, Any]=8 , __a : Union[str, Any]=7_68 , __a : List[str]=12 , __a : Union[str, Any]=12 , __a : Optional[Any]=30_72 , __a : Tuple="gelu" , __a : str=0.0 , __a : List[Any]=0.0 , __a : Any=0.02 , __a : List[str]=1e-6 , __a : Any=True , __a : Union[str, Any]="divided_space_time" , __a : str=0 , **__a : Tuple , ): super().__init__(**__a ) _a = image_size _a = patch_size _a = num_channels _a = num_frames _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = layer_norm_eps _a = qkv_bias _a = attention_type _a = drop_path_rate
692
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __a =42 __a =42 class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Union[str, Any] , __a : int ): _a = [[] for _ in range(__a )] _a = size def __getitem__( self : int , __a : int ): return iter(self._graph[vertex] ) @property def UpperCamelCase__ ( self : Dict ): return self._size def UpperCamelCase__ ( self : Union[str, Any] , __a : int , __a : int , __a : int ): if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1." ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size)." ) self._graph[from_vertex].append(Edge(__a , __a ) ) def UpperCamelCase__ ( self : Tuple , __a : int , __a : int ): _a = deque([start_vertex] ) _a = [None] * self.size _a = 0 while queue: _a = queue.popleft() _a = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _a = current_distance + edge.weight _a = distances[edge.destination_vertex] if ( isinstance(__a , __a ) and new_distance >= dest_vertex_distance ): continue _a = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex." ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
692
1
def __A ( _A ): """simple docstring""" if not isinstance(_A , _A ) or number < 0: raise ValueError("Input must be a non-negative integer" ) __a = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
715
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
525
0
'''simple docstring''' def _lowerCamelCase (__lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> int: a__ = [False] * len(__SCREAMING_SNAKE_CASE ) a__ = [] queue.append(__SCREAMING_SNAKE_CASE ) a__ = True while queue: a__ = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__SCREAMING_SNAKE_CASE ) a__ = True a__ = u return visited[t] def _lowerCamelCase (__lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ) -> str: a__ = [-1] * (len(__SCREAMING_SNAKE_CASE )) a__ = 0 while bfs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): a__ = float("Inf" ) a__ = sink while s != source: # Find the minimum value in select path a__ = min(__SCREAMING_SNAKE_CASE , graph[parent[s]][s] ) a__ = parent[s] max_flow += path_flow a__ = sink while v != source: a__ = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow a__ = parent[v] return max_flow lowerCAmelCase_ : int = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase_ : Optional[Any] = 0, 5 print(ford_fulkerson(graph, source, sink))
489
'''simple docstring''' from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class UpperCAmelCase ( lowercase_ , lowercase_): """simple docstring""" lowerCAmelCase_ = """pixel_values""" lowerCAmelCase_ = False lowerCAmelCase_ = TimmBackboneConfig def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> List[str]: requires_backends(self , '''timm''' ) super().__init__(UpperCamelCase__ ) _UpperCamelCase =config if config.backbone is None: raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' ) if config.backbone not in timm.list_models(): raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' ) if hasattr(UpperCamelCase__ , '''out_features''' ) and config.out_features is not None: raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' ) _UpperCamelCase =getattr(UpperCamelCase__ , '''use_pretrained_backbone''' , UpperCamelCase__ ) if pretrained is None: raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' ) # We just take the final layer by default. This matches the default for the transformers models. _UpperCamelCase =config.out_indices if getattr(UpperCamelCase__ , '''out_indices''' , UpperCamelCase__ ) is not None else (-1,) _UpperCamelCase =timm.create_model( config.backbone , pretrained=UpperCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase__ , **UpperCamelCase__ , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. _UpperCamelCase =self._backbone.return_layers _UpperCamelCase ={layer['''module''']: str(UpperCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(UpperCamelCase__ ) @classmethod def UpperCamelCase__ ( cls : List[Any] , UpperCamelCase__ : Tuple , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ) -> Optional[int]: requires_backends(cls , ['''vision''', '''timm'''] ) from ...models.timm_backbone import TimmBackboneConfig _UpperCamelCase =kwargs.pop('''config''' , TimmBackboneConfig() ) _UpperCamelCase =kwargs.pop('''use_timm_backbone''' , UpperCamelCase__ ) if not use_timm: raise ValueError('''use_timm_backbone must be True for timm backbones''' ) _UpperCamelCase =kwargs.pop('''num_channels''' , config.num_channels ) _UpperCamelCase =kwargs.pop('''features_only''' , config.features_only ) _UpperCamelCase =kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone ) _UpperCamelCase =kwargs.pop('''out_indices''' , config.out_indices ) _UpperCamelCase =TimmBackboneConfig( backbone=UpperCamelCase__ , num_channels=UpperCamelCase__ , features_only=UpperCamelCase__ , use_pretrained_backbone=UpperCamelCase__ , out_indices=UpperCamelCase__ , ) return super()._from_config(UpperCamelCase__ , **UpperCamelCase__ ) def UpperCamelCase__ ( self : str , UpperCamelCase__ : List[str] ) -> List[str]: pass def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : str ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: _UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase =output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('''Cannot output attentions for timm backbones at the moment''' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone _UpperCamelCase =self._all_layers _UpperCamelCase =self._backbone(UpperCamelCase__ , **UpperCamelCase__ ) _UpperCamelCase =self._return_layers _UpperCamelCase =tuple(hidden_states[i] for i in self.out_indices ) else: _UpperCamelCase =self._backbone(UpperCamelCase__ , **UpperCamelCase__ ) _UpperCamelCase =None _UpperCamelCase =tuple(UpperCamelCase__ ) _UpperCamelCase =tuple(UpperCamelCase__ ) if hidden_states is not None else None if not return_dict: _UpperCamelCase =(feature_maps,) if output_hidden_states: _UpperCamelCase =output + (hidden_states,) return output return BackboneOutput(feature_maps=UpperCamelCase__ , hidden_states=UpperCamelCase__ , attentions=UpperCamelCase__ )
404
0
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): a__ = 1 @register_to_config def __init__( self , lowercase=20_00 , lowercase=0.1 , lowercase=20 , lowercase=1e-3) -> List[Any]: '''simple docstring''' a__: int = None a__: Optional[int] = None a__: str = None def lowerCamelCase_ ( self , lowercase , lowercase = None) -> int: '''simple docstring''' a__: int = torch.linspace(1 , self.config.sampling_eps , lowercase , device=lowercase) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None) -> Tuple: '''simple docstring''' if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler') # TODO(Patrick) better comments + non-PyTorch # postprocess model score a__: Union[str, Any] = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) a__: Optional[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) a__: Union[str, Any] = std.flatten() while len(std.shape) < len(score.shape): a__: List[str] = std.unsqueeze(-1) a__: Union[str, Any] = -score / std # compute a__: str = -1.0 / len(self.timesteps) a__: Tuple = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) a__: str = beta_t.flatten() while len(beta_t.shape) < len(x.shape): a__: Optional[int] = beta_t.unsqueeze(-1) a__: Optional[int] = -0.5 * beta_t * x a__: Optional[int] = torch.sqrt(lowercase) a__: Optional[Any] = drift - diffusion**2 * score a__: int = x + drift * dt # add noise a__: Dict = randn_tensor(x.shape , layout=x.layout , generator=lowercase , device=x.device , dtype=x.dtype) a__: Union[str, Any] = x_mean + diffusion * math.sqrt(-dt) * noise return x, x_mean def __len__( self) -> List[Any]: '''simple docstring''' return self.config.num_train_timesteps
217
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ 'SEW_PRETRAINED_MODEL_ARCHIVE_LIST', 'SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
217
1
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" a : Optional[int] =XLMTokenizer a : int =False def _a ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase_: Optional[int] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] UpperCamelCase_: str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) UpperCamelCase_: Union[str, Any] = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] UpperCamelCase_: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) UpperCamelCase_: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(_lowerCamelCase ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(_lowerCamelCase ) ) def _a ( self , _lowerCamelCase ): UpperCamelCase_: Optional[Any] = 'lower newer' UpperCamelCase_: str = 'lower newer' return input_text, output_text def _a ( self ): UpperCamelCase_: str = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCamelCase_: List[Any] = 'lower' UpperCamelCase_: Any = ['low', 'er</w>'] UpperCamelCase_: Optional[int] = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_: int = tokens + ['<unk>'] UpperCamelCase_: Tuple = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) @slow def _a ( self ): UpperCamelCase_: Union[str, Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' ) UpperCamelCase_: Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=_lowerCamelCase ) UpperCamelCase_: List[str] = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCamelCase ) UpperCamelCase_: Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) UpperCamelCase_: Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
57
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=[0.5, 0.5, 0.5] , _snake_case=[0.5, 0.5, 0.5] , ): """simple docstring""" __lowerCamelCase = size if size is not None else {'''height''': 18, '''width''': 18} __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = min_resolution __lowerCamelCase = max_resolution __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = do_normalize __lowerCamelCase = image_mean __lowerCamelCase = image_std def _lowerCamelCase ( self ): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ = DPTImageProcessor if is_vision_available() else None def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = DPTImageProcessingTester(self ) @property def _lowerCamelCase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , '''image_mean''' ) ) self.assertTrue(hasattr(_snake_case , '''image_std''' ) ) self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) ) self.assertTrue(hasattr(_snake_case , '''do_resize''' ) ) self.assertTrue(hasattr(_snake_case , '''size''' ) ) def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
316
0
"""simple docstring""" from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('''socket.socket''') @patch('''builtins.open''') def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_): a__ = Mock() a__ = conn, Mock() a__ = iter([1, None]) a__ = lambda lowerCamelCase_: next(_lowercase) # ===== invoke ===== send_file(filename='''mytext.txt''' , testing=_lowercase) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
709
"""simple docstring""" __a : Union[str, Any] = range(2, 20 + 1) __a : Any = [10**k for k in range(ks[-1] + 1)] __a : dict[int, dict[int, list[list[int]]]] = {} def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_): a__ = sum(a_i[j] for j in range(lowerCamelCase_ , len(lowerCamelCase_))) a__ = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase_) , lowerCamelCase_))) a__ ,a__ = 0, 0 a__ = n - i a__ = memo.get(lowerCamelCase_) if sub_memo is not None: a__ = sub_memo.get(lowerCamelCase_) if jumps is not None and len(lowerCamelCase_) > 0: # find and make the largest jump without going over a__ = -1 for _k in range(len(lowerCamelCase_) - 1 , -1 , -1): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: a__ = _k break if max_jump >= 0: a__ ,a__ ,a__ = jumps[max_jump] # since the difference between jumps is cached, add c a__ = diff + c for j in range(min(lowerCamelCase_ , len(lowerCamelCase_))): a__ ,a__ = divmod(lowerCamelCase_ , 10) if new_c > 0: add(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) else: a__ = [] else: a__ = {c: []} a__ = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps a__ ,a__ = next_term(lowerCamelCase_ , k - 1 , i + dn , lowerCamelCase_) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead a__ ,a__ = compute(lowerCamelCase_ , lowerCamelCase_ , i + dn , lowerCamelCase_) diff += _diff dn += terms_jumped a__ = sub_memo[c] # keep jumps sorted by # of terms skipped a__ = 0 while j < len(lowerCamelCase_): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(lowerCamelCase_ , (diff, dn, k)) return (diff, dn) def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_): if i >= n: return 0, i if k > len(lowerCamelCase_): a_i.extend([0 for _ in range(k - len(lowerCamelCase_))]) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) a__ = i a__ ,a__ ,a__ = 0, 0, 0 for j in range(len(lowerCamelCase_)): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 a__ = ds_c + ds_b diff += addend a__ = 0 for j in range(lowerCamelCase_): a__ = a_i[j] + addend a__ ,a__ = divmod(lowerCamelCase_ , 10) ds_c += a_i[j] if addend > 0: break if addend > 0: add(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) return diff, i - start_i def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_): for j in range(lowerCamelCase_ , len(lowerCamelCase_)): a__ = digits[j] + addend if s >= 10: a__ ,a__ = divmod(lowerCamelCase_ , 10) a__ = addend // 10 + quotient else: a__ = s a__ = addend // 10 if addend == 0: break while addend > 0: a__ ,a__ = divmod(lowerCamelCase_ , 10) digits.append(lowerCamelCase_) def SCREAMING_SNAKE_CASE ( lowerCamelCase_ = 10**15): a__ = [1] a__ = 1 a__ = 0 while True: a__ ,a__ = next_term(lowerCamelCase_ , 20 , i + dn , lowerCamelCase_) dn += terms_jumped if dn == n - i: break a__ = 0 for j in range(len(lowerCamelCase_)): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F'''{solution() = }''')
200
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} lowerCamelCase : Tuple = { "vocab_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt", }, "emoji_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json", }, } lowerCamelCase : Any = { "abeja/gpt-neox-japanese-2.7b": 2_048, } def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Any ): '''simple docstring''' with open(lowercase , 'r' , encoding='utf-8' ) as f: lowerCamelCase_ = json.loads(f.read() ) lowerCamelCase_ = collections.OrderedDict() lowerCamelCase_ = collections.OrderedDict() lowerCamelCase_ = collections.OrderedDict() with open(lowercase , 'r' , encoding='utf-8' ) as f: lowerCamelCase_ = f.readlines() lowerCamelCase_ = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(lowercase ): lowerCamelCase_ = b lowerCamelCase_ = idx for wd in b: lowerCamelCase_ = idx return vocab, raw_vocab, ids_to_tokens, emoji class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : str , A_ : Any , A_ : Any , A_ : Optional[Any]="<|endoftext|>" , A_ : Any="<|endoftext|>" , A_ : Optional[int]="<|startoftext|>" , A_ : Union[str, Any]="<|endoftext|>" , A_ : Any=False , **A_ : Tuple , ) -> Dict: """simple docstring""" super().__init__( unk_token=A_ , pad_token=A_ , bos_token=A_ , eos_token=A_ , do_clean_text=A_ , **A_ , ) if not os.path.isfile(A_ ): raise ValueError( f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) if not os.path.isfile(A_ ): raise ValueError( f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) lowerCamelCase_ = do_clean_text lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = load_vocab_and_emoji(A_ , A_ ) lowerCamelCase_ = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def a__ ( self : List[str] ) -> Dict: """simple docstring""" return len(self.raw_vocab ) def a__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" return dict(self.raw_vocab , **self.added_tokens_encoder ) def a__ ( self : Optional[Any] , A_ : str ) -> Tuple: """simple docstring""" return self.subword_tokenizer.tokenize(A_ , clean=self.do_clean_text ) def a__ ( self : Optional[int] , A_ : Dict ) -> List[Any]: """simple docstring""" return self.vocab.get(A_ , self.vocab.get(self.unk_token ) ) def a__ ( self : Union[str, Any] , A_ : Union[str, Any] ) -> int: """simple docstring""" return self.subword_tokenizer.convert_id_to_token(A_ ) def a__ ( self : Optional[int] , A_ : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ = ''.join(A_ ).strip() return out_string def a__ ( self : Optional[Any] , A_ : "Conversation" ) -> List[int]: """simple docstring""" lowerCamelCase_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] ) if len(A_ ) > self.model_max_length: lowerCamelCase_ = input_ids[-self.model_max_length :] return input_ids def a__ ( self : List[Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" lowerCamelCase_ = 0 if os.path.isdir(A_ ): lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] ) else: lowerCamelCase_ = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!' ) lowerCamelCase_ = token_index writer.write(','.join(A_ ) + '\n' ) index += 1 with open(A_ , 'w' , encoding='utf-8' ) as writer: json.dump(self.emoji , A_ ) return vocab_file, emoji_file class A( UpperCamelCase ): '''simple docstring''' def __init__( self : Any , A_ : Union[str, Any] , A_ : int , A_ : Tuple ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = vocab # same as swe lowerCamelCase_ = ids_to_tokens # same as bpe lowerCamelCase_ = emoji lowerCamelCase_ = np.max([len(A_ ) for w in self.vocab.keys()] ) lowerCamelCase_ = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' ) lowerCamelCase_ = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' ) lowerCamelCase_ = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' ) lowerCamelCase_ = re.compile( r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCamelCase_ = re.compile( r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) lowerCamelCase_ = re.compile( r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' ) lowerCamelCase_ = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' lowerCamelCase_ = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' lowerCamelCase_ = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} ) def __len__( self : str ) -> Optional[int]: """simple docstring""" return len(self.ids_to_tokens ) def a__ ( self : Union[str, Any] , A_ : Dict ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = self.content_repattera.sub('<URL>' , A_ ) lowerCamelCase_ = self.content_repattera.sub('<EMAIL>' , A_ ) lowerCamelCase_ = self.content_repattera.sub('<TEL>' , A_ ) lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ ) lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ ) lowerCamelCase_ = self.content_repattera.sub('<PRICE>' , A_ ) lowerCamelCase_ = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: lowerCamelCase_ = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' ) return content def a__ ( self : int , A_ : Optional[Any] , A_ : Tuple=False ) -> Dict: """simple docstring""" lowerCamelCase_ = text.replace(' ' , '<SP>' ) lowerCamelCase_ = text.replace(' ' , '<SP>' ) lowerCamelCase_ = text.replace('\r\n' , '<BR>' ) lowerCamelCase_ = text.replace('\n' , '<BR>' ) lowerCamelCase_ = text.replace('\r' , '<BR>' ) lowerCamelCase_ = text.replace('\t' , '<TAB>' ) lowerCamelCase_ = text.replace('—' , 'ー' ) lowerCamelCase_ = text.replace('−' , 'ー' ) for k, v in self.emoji["emoji"].items(): if k in text: lowerCamelCase_ = text.replace(A_ , A_ ) if clean: lowerCamelCase_ = self.clean_text(A_ ) def check_simbol(A_ : Union[str, Any] ): lowerCamelCase_ = x.encode() if len(A_ ) == 1 and len(A_ ) == 2: lowerCamelCase_ = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0XC2_A1 and c <= 0XC2_BF) or (c >= 0XC7_80 and c <= 0XC7_83) or (c >= 0XCA_B9 and c <= 0XCB_BF) or (c >= 0XCC_80 and c <= 0XCD_A2) ): return True return False def checkuae(A_ : Tuple ): lowerCamelCase_ = x.encode() if len(A_ ) == 1 and len(A_ ) == 3: lowerCamelCase_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0XE2_80_80 and c <= 0XE2_B0_7F: return True return False lowerCamelCase_ = 0 lowerCamelCase_ = [] while pos < len(A_ ): lowerCamelCase_ = min(len(A_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3 lowerCamelCase_ = [] # (token_id, token, pos) for e in range(A_ , A_ , -1 ): lowerCamelCase_ = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(A_ ) > 2: lowerCamelCase_ = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(A_ ) > 0: # the smallest token_id is adopted lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sorted(A_ , key=lambda A_ : x[0] )[0] result.append(A_ ) lowerCamelCase_ = e else: lowerCamelCase_ = pos + 1 lowerCamelCase_ = text[pos:end] if check_simbol(A_ ): result.append('<KIGOU>' ) elif checkuae(A_ ): result.append('<U2000U2BFF>' ) else: for i in wd.encode('utf-8' ): result.append('<|byte%d|>' % i ) lowerCamelCase_ = end return result def a__ ( self : List[Any] , A_ : Tuple , A_ : List[str]="\n" ) -> List[str]: """simple docstring""" lowerCamelCase_ = [] lowerCamelCase_ = [] lowerCamelCase_ = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(A_ ) > 0: words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) ) lowerCamelCase_ = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word] ) elif word == "<SP>": words.append(' ' ) elif word == "<BR>": words.append(A_ ) elif word == "<TAB>": words.append('\t' ) elif word == "<BLOCK>": words.append('▀' ) elif word == "<KIGOU>": words.append('ǀ' ) elif word == "<U2000U2BFF>": words.append('‖' ) else: words.append(A_ ) if len(A_ ) > 0: words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) ) lowerCamelCase_ = ''.join(A_ ) return text
70
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ): '''simple docstring''' __A =size if size is not None else {'''height''': 1_8, '''width''': 1_8} __A =parent __A =batch_size __A =num_channels __A =image_size __A =min_resolution __A =max_resolution __A =do_resize __A =size __A =apply_ocr def __UpperCamelCase ( self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __UpperCamelCase ( self ): '''simple docstring''' __A =LayoutLMvaImageProcessingTester(self ) @property def __UpperCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self ): '''simple docstring''' __A =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowercase__ , '''size''' ) ) self.assertTrue(hasattr(lowercase__ , '''apply_ocr''' ) ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} ) __A =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} ) def __UpperCamelCase ( self ): '''simple docstring''' pass def __UpperCamelCase ( self ): '''simple docstring''' __A =self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , Image.Image ) # Test not batched input __A =image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , lowercase__ ) self.assertIsInstance(encoding.boxes , lowercase__ ) # Test batched __A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , np.ndarray ) # Test not batched input __A =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ , torch.Tensor ) # Test not batched input __A =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __A =image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __UpperCamelCase ( self ): '''simple docstring''' __A =LayoutLMvaImageProcessor() from datasets import load_dataset __A =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) __A =Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A =image_processing(lowercase__ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A =[[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , lowercase__ ) self.assertListEqual(encoding.boxes , lowercase__ ) # with apply_OCR = False __A =LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) __A =image_processing(lowercase__ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
184
0
_UpperCamelCase : List[str] =256 # Modulus to hash a string _UpperCamelCase : Optional[Any] =1000003 def a__ (__lowercase :str , __lowercase :Tuple ) -> bool: _A : Tuple = len(__lowercase ) _A : str = len(__lowercase ) if p_len > t_len: return False _A : List[str] = 0 _A : Tuple = 0 _A : Union[str, Any] = 1 # Calculating the hash of pattern and substring of text for i in range(__lowercase ): _A : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _A : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _A : Union[str, Any] = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _A : Tuple = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def a__ () -> None: _A : str = '''abc1abc12''' _A : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _A : Union[str, Any] = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase ) # Test 2) _A : int = '''ABABX''' _A : Dict = '''ABABZABABYABABX''' assert rabin_karp(__lowercase , __lowercase ) # Test 3) _A : str = '''AAAB''' _A : Optional[int] = '''ABAAAAAB''' assert rabin_karp(__lowercase , __lowercase ) # Test 4) _A : str = '''abcdabcy''' _A : Tuple = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(__lowercase , __lowercase ) # Test 5) _A : Tuple = '''Lü''' _A : Tuple = '''Lüsai''' assert rabin_karp(__lowercase , __lowercase ) _A : str = '''Lue''' assert not rabin_karp(__lowercase , __lowercase ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
710
from ..utils import DummyObject, requires_backends class UpperCAmelCase__ ( metaclass=__snake_case ): __snake_case : Optional[Any] = ["note_seq"] def __init__( self ,*A__ ,**A__ ): requires_backends(self ,['''note_seq'''] ) @classmethod def A__ ( cls ,*A__ ,**A__ ): requires_backends(cls ,['''note_seq'''] ) @classmethod def A__ ( cls ,*A__ ,**A__ ): requires_backends(cls ,['''note_seq'''] )
332
0
import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class lowerCAmelCase_ : def __init__( self ,snake_case__=2 ,snake_case__=3 ,snake_case__=64 ,snake_case__=None ): SCREAMING_SNAKE_CASE_ : List[Any] = np.random.default_rng(snake_case__ ) SCREAMING_SNAKE_CASE_ : int = length SCREAMING_SNAKE_CASE_ : Optional[int] = rng.normal(size=(length,) ).astype(np.floataa ) SCREAMING_SNAKE_CASE_ : List[Any] = a * self.x + b + rng.normal(scale=0.1 ,size=(length,) ).astype(np.floataa ) def __len__( self ): return self.length def __getitem__( self ,snake_case__ ): return {"x": self.x[i], "y": self.y[i]} class lowerCAmelCase_ ( torch.nn.Module ): def __init__( self ,snake_case__=0 ,snake_case__=0 ,snake_case__=False ): super().__init__() SCREAMING_SNAKE_CASE_ : Dict = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = True def snake_case ( self ,snake_case__=None ): if self.first_batch: print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = False return x * self.a[0] + self.b[0] class lowerCAmelCase_ ( torch.nn.Module ): def __init__( self ,snake_case__=0 ,snake_case__=0 ,snake_case__=False ): super().__init__() SCREAMING_SNAKE_CASE_ : List[str] = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) SCREAMING_SNAKE_CASE_ : Tuple = True def snake_case ( self ,snake_case__=None ): if self.first_batch: print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) SCREAMING_SNAKE_CASE_ : Tuple = False return x * self.a + self.b def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int = 16 ) -> List[str]: """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = AutoTokenizer.from_pretrained('bert-base-cased' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'} SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('csv' , data_files=lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Dict = datasets['train'].unique('label' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: i for i, v in enumerate(lowerCamelCase_ )} def tokenize_function(lowerCamelCase_ : List[str] ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_ : Dict = tokenizer( examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' ) if "label" in examples: SCREAMING_SNAKE_CASE_ : str = [label_to_id[l] for l in examples['label']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.map( lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['sentence1', 'sentence2', 'label'] , ) def collate_fn(lowerCamelCase_ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase_ , padding='max_length' , max_length=1_28 , return_tensors='pt' ) return tokenizer.pad(lowerCamelCase_ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_ : Any = DataLoader(tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=2 ) SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=1 ) return train_dataloader, eval_dataloader
105
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list: '''simple docstring''' UpperCamelCase = len(UpperCamelCase_ ) UpperCamelCase = [[0] * n for i in range(UpperCamelCase_ )] for i in range(UpperCamelCase_ ): UpperCamelCase = y_points[i] for i in range(2 , UpperCamelCase_ ): for j in range(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
537
0
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() a = logging.get_logger(__name__) a = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] a = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def _SCREAMING_SNAKE_CASE ( snake_case ) -> Any: _UpperCAmelCase = torch.load(snake_case , map_location="""cpu""" ) return sd def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=rename_keys_prefix ) -> Any: _UpperCAmelCase = OrderedDict() _UpperCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _UpperCAmelCase = key for name_pair in rename_keys_prefix: _UpperCAmelCase = new_key.replace(name_pair[0] , name_pair[1] ) _UpperCAmelCase = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _UpperCAmelCase = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Dict: assert ( checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS ), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}." # Get Config if "pre" in checkpoint_path: _UpperCAmelCase = """pretraining""" if "vcr" in checkpoint_path: _UpperCAmelCase = {"""visual_embedding_dim""": 5_1_2} elif "vqa_advanced" in checkpoint_path: _UpperCAmelCase = {"""visual_embedding_dim""": 2_0_4_8} elif "vqa" in checkpoint_path: _UpperCAmelCase = {"""visual_embedding_dim""": 2_0_4_8} elif "nlvr" in checkpoint_path: _UpperCAmelCase = {"""visual_embedding_dim""": 1_0_2_4} else: raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." ) else: if "vcr" in checkpoint_path: _UpperCAmelCase = {"""visual_embedding_dim""": 5_1_2} _UpperCAmelCase = """multichoice""" elif "vqa_advanced" in checkpoint_path: _UpperCAmelCase = {"""visual_embedding_dim""": 2_0_4_8} _UpperCAmelCase = """vqa_advanced""" elif "vqa" in checkpoint_path: _UpperCAmelCase = {"""visual_embedding_dim""": 2_0_4_8, """num_labels""": 3_1_2_9} _UpperCAmelCase = """vqa""" elif "nlvr" in checkpoint_path: _UpperCAmelCase = { """visual_embedding_dim""": 1_0_2_4, """num_labels""": 2, } _UpperCAmelCase = """nlvr""" _UpperCAmelCase = VisualBertConfig(**snake_case ) # Load State Dict _UpperCAmelCase = load_state_dict(snake_case ) _UpperCAmelCase = get_new_dict(snake_case , snake_case ) if model_type == "pretraining": _UpperCAmelCase = VisualBertForPreTraining(snake_case ) elif model_type == "vqa": _UpperCAmelCase = VisualBertForQuestionAnswering(snake_case ) elif model_type == "nlvr": _UpperCAmelCase = VisualBertForVisualReasoning(snake_case ) elif model_type == "multichoice": _UpperCAmelCase = VisualBertForMultipleChoice(snake_case ) model.load_state_dict(snake_case ) # Save Checkpoints Path(snake_case ).mkdir(exist_ok=snake_case ) model.save_pretrained(snake_case ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") a = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
175
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> bool: _UpperCAmelCase = len(snake_case ) + 1 _UpperCAmelCase = len(snake_case ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. _UpperCAmelCase = [[0 for i in range(snake_case )] for j in range(snake_case )] # since string of zero length match pattern of zero length _UpperCAmelCase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , snake_case ): _UpperCAmelCase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , snake_case ): _UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , snake_case ): for j in range(1 , snake_case ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": _UpperCAmelCase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: _UpperCAmelCase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): _UpperCAmelCase = dp[i - 1][j] else: _UpperCAmelCase = 0 else: _UpperCAmelCase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") a = "aab" a = "c*a*b" # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F'{input_string} matches the given pattern {pattern}') else: print(F'{input_string} does not match with the given pattern {pattern}')
175
1
import numpy as np from PIL import Image def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> np.ndarray: lowerCAmelCase = np.array(snake_case__ ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 # compute the shape of the output matrix lowerCAmelCase = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape lowerCAmelCase = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix lowerCAmelCase = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase = 0 lowerCAmelCase = 0 return updated_arr def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> np.ndarray: lowerCAmelCase = np.array(snake_case__ ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 # compute the shape of the output matrix lowerCAmelCase = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape lowerCAmelCase = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix lowerCAmelCase = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCAmelCase = 0 lowerCAmelCase = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image lowercase__ : int = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
312
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class lowercase_ ( UpperCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = SMALL_MODEL_IDENTIFIER lowerCAmelCase = '''pt''' lowerCAmelCase = '''tf''' def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]: lowerCAmelCase = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any: lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=__SCREAMING_SNAKE_CASE ) model_tf.save_pretrained(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: lowerCAmelCase = '''mock_framework''' # Framework provided - return whatever the user provides lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(__SCREAMING_SNAKE_CASE ): lowerCAmelCase = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) ->Any: lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE ) with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt ) # PyTorch not in environment -> use TensorFlow lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE ) with patch('''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_tf ) # Both in environment -> use PyTorch lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE ) with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ), patch( '''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ): lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt ) # Both not in environment -> raise error lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE ) with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ), patch( '''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ): with self.assertRaises(__SCREAMING_SNAKE_CASE ): lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
312
1
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCamelCase_ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class snake_case_ ( datasets.BuilderConfig ): '''simple docstring''' __UpperCamelCase = None def _UpperCAmelCase ( A , A , ): '''simple docstring''' import pyspark def generate_fn(): UpperCAmelCase__ =df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) ) for partition_id in partition_order: UpperCAmelCase__ =df_with_partition_id.select("*" ).where(F"""part_id = {partition_id}""" ).drop("part_id" ) UpperCAmelCase__ =partition_df.collect() UpperCAmelCase__ =0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class snake_case_ ( _BaseExamplesIterable ): '''simple docstring''' def __init__( self, A_, A_=None, ) -> Dict: UpperCAmelCase__ =df UpperCAmelCase__ =partition_order or range(self.df.rdd.getNumPartitions() ) UpperCAmelCase__ =_generate_iterable_examples(self.df, self.partition_order ) def __iter__( self ) -> List[Any]: yield from self.generate_examples_fn() def __UpperCAmelCase ( self, A_ ) -> Union[str, Any]: UpperCAmelCase__ =list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(__lowerCAmelCase ) return SparkExamplesIterable(self.df, partition_order=__lowerCAmelCase ) def __UpperCAmelCase ( self, A_, A_ ) -> int: UpperCAmelCase__ =self.split_shard_indices_by_worker(__lowerCAmelCase, __lowerCAmelCase ) return SparkExamplesIterable(self.df, partition_order=__lowerCAmelCase ) @property def __UpperCAmelCase ( self ) -> List[Any]: return len(self.partition_order ) class snake_case_ ( datasets.DatasetBuilder ): '''simple docstring''' __UpperCamelCase = SparkConfig def __init__( self, A_, A_ = None, A_ = None, **A_, ) -> Union[str, Any]: import pyspark UpperCAmelCase__ =pyspark.sql.SparkSession.builder.getOrCreate() UpperCAmelCase__ =df UpperCAmelCase__ =working_dir super().__init__( cache_dir=__lowerCAmelCase, config_name=str(self.df.semanticHash() ), **__lowerCAmelCase, ) def __UpperCAmelCase ( self ) -> List[Any]: def create_cache_and_write_probe(A_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir, exist_ok=__lowerCAmelCase ) UpperCAmelCase__ =os.path.join(self._cache_dir, "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(__lowerCAmelCase, "a" ) return [probe_file] if self._spark.conf.get("spark.master", "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: UpperCAmelCase__ =( self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(__lowerCAmelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def __UpperCAmelCase ( self ) -> Tuple: return datasets.DatasetInfo(features=self.config.features ) def __UpperCAmelCase ( self, A_ ) -> Dict: return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __UpperCAmelCase ( self, A_ ) -> Union[str, Any]: import pyspark def get_arrow_batch_size(A_ ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) UpperCAmelCase__ =self.df.count() UpperCAmelCase__ =df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. UpperCAmelCase__ =( self.df.limit(__lowerCAmelCase ) .repartition(1 ) .mapInArrow(__lowerCAmelCase, "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) UpperCAmelCase__ =approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. UpperCAmelCase__ =min(__lowerCAmelCase, int(approx_total_size / max_shard_size ) ) UpperCAmelCase__ =self.df.repartition(__lowerCAmelCase ) def __UpperCAmelCase ( self, A_, A_, A_, ) -> Union[str, Any]: import pyspark UpperCAmelCase__ =ParquetWriter if file_format == "parquet" else ArrowWriter UpperCAmelCase__ =os.path.join(self._working_dir, os.path.basename(__lowerCAmelCase ) ) if self._working_dir else fpath UpperCAmelCase__ =file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. UpperCAmelCase__ =self.config.features UpperCAmelCase__ =self._writer_batch_size UpperCAmelCase__ =self._fs.storage_options def write_arrow(A_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. UpperCAmelCase__ =pyspark.TaskContext().taskAttemptId() UpperCAmelCase__ =next(__lowerCAmelCase, __lowerCAmelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]], names=["task_id", "num_examples", "num_bytes"], ) UpperCAmelCase__ =0 UpperCAmelCase__ =writer_class( features=__lowerCAmelCase, path=working_fpath.replace("SSSSS", f"""{shard_id:05d}""" ).replace("TTTTT", f"""{task_id:05d}""" ), writer_batch_size=__lowerCAmelCase, storage_options=__lowerCAmelCase, embed_local_files=__lowerCAmelCase, ) UpperCAmelCase__ =pa.Table.from_batches([first_batch] ) writer.write_table(__lowerCAmelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: UpperCAmelCase__ , UpperCAmelCase__ =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) shard_id += 1 UpperCAmelCase__ =writer_class( features=writer._features, path=working_fpath.replace("SSSSS", f"""{shard_id:05d}""" ).replace("TTTTT", f"""{task_id:05d}""" ), writer_batch_size=__lowerCAmelCase, storage_options=__lowerCAmelCase, embed_local_files=__lowerCAmelCase, ) UpperCAmelCase__ =pa.Table.from_batches([batch] ) writer.write_table(__lowerCAmelCase ) if writer._num_bytes > 0: UpperCAmelCase__ , UpperCAmelCase__ =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(__lowerCAmelCase ) ): UpperCAmelCase__ =os.path.join(os.path.dirname(__lowerCAmelCase ), os.path.basename(__lowerCAmelCase ) ) shutil.move(__lowerCAmelCase, __lowerCAmelCase ) UpperCAmelCase__ =( self.df.mapInArrow(__lowerCAmelCase, "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ), pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ), pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ), pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ), ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __UpperCAmelCase ( self, A_, A_ = "arrow", A_ = None, A_ = None, **A_, ) -> Any: self._validate_cache_dir() UpperCAmelCase__ =convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(__lowerCAmelCase ) UpperCAmelCase__ =not is_remote_filesystem(self._fs ) UpperCAmelCase__ =os.path.join if is_local else posixpath.join UpperCAmelCase__ ="-TTTTT-SSSSS-of-NNNNN" UpperCAmelCase__ =f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" UpperCAmelCase__ =path_join(self._output_dir, __lowerCAmelCase ) UpperCAmelCase__ =0 UpperCAmelCase__ =0 UpperCAmelCase__ =0 UpperCAmelCase__ =[] UpperCAmelCase__ =[] for task_id, content in self._prepare_split_single(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase ): ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) =content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(__lowerCAmelCase ) UpperCAmelCase__ =total_num_examples UpperCAmelCase__ =total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: UpperCAmelCase__ =all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. UpperCAmelCase__ =self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( A_, A_, A_, ): rename( __lowerCAmelCase, fpath.replace("SSSSS", f"""{shard_id:05d}""" ).replace("TTTTT", f"""{task_id:05d}""" ), fpath.replace("TTTTT-SSSSS", f"""{global_shard_id:05d}""" ).replace("NNNNN", f"""{total_shards:05d}""" ), ) UpperCAmelCase__ =[] UpperCAmelCase__ =0 for i in range(len(__lowerCAmelCase ) ): UpperCAmelCase__ , UpperCAmelCase__ =task_id_and_num_shards[i] for shard_id in range(__lowerCAmelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(__lowerCAmelCase, len(__lowerCAmelCase ) ).map(lambda A_ : _rename_shard(*__lowerCAmelCase ) ).collect() else: # don't use any pattern UpperCAmelCase__ =0 UpperCAmelCase__ =task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS", f"""{shard_id:05d}""" ).replace("TTTTT", f"""{task_id:05d}""" ), fpath.replace(__lowerCAmelCase, "" ), ) def __UpperCAmelCase ( self, A_, ) -> Optional[int]: return SparkExamplesIterable(self.df )
707
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def _UpperCAmelCase ( A ): '''simple docstring''' return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) UpperCamelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n' class snake_case_ ( a ): '''simple docstring''' @staticmethod def __UpperCAmelCase ( A_ ) -> Union[str, Any]: UpperCAmelCase__ =parser.add_parser( "convert", help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.", ) train_parser.add_argument("--model_type", type=A_, required=A_, help="Model's type." ) train_parser.add_argument( "--tf_checkpoint", type=A_, required=A_, help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output", type=A_, required=A_, help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config", type=A_, default="", help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name", type=A_, default=A_, help="Optional fine-tuning task name if the TF model was a finetuned model.", ) train_parser.set_defaults(func=A_ ) def __init__( self, A_, A_, A_, A_, A_, *A_, ) -> List[str]: UpperCAmelCase__ =logging.get_logger("transformers-cli/converting" ) self._logger.info(f"""Loading model {model_type}""" ) UpperCAmelCase__ =model_type UpperCAmelCase__ =tf_checkpoint UpperCAmelCase__ =pytorch_dump_output UpperCAmelCase__ =config UpperCAmelCase__ =finetuning_task_name def __UpperCAmelCase ( self ) -> Tuple: if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(A_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A_ ) if "ckpt" in self._tf_checkpoint.lower(): UpperCAmelCase__ =self._tf_checkpoint UpperCAmelCase__ ="" else: UpperCAmelCase__ =self._tf_checkpoint UpperCAmelCase__ ="" convert_transfo_xl_checkpoint_to_pytorch( A_, self._config, self._pytorch_dump_output, A_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
510
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) A__ : Optional[int] =logging.getLogger(__name__) @dataclass class UpperCAmelCase : _lowercase: str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _lowercase: Optional[str] = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _lowercase: Optional[str] = field( default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _lowercase: Optional[str] = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) _lowercase: bool = field(default=snake_case_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) _lowercase: bool = field(default=snake_case_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class UpperCAmelCase : _lowercase: str = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) _lowercase: Optional[str] = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) _lowercase: Optional[int] = field( default=1024 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) _lowercase: Optional[int] = field( default=128 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) _lowercase: Optional[int] = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) _lowercase: Optional[int] = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) _lowercase: Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) _lowercase: Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) _lowercase: Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) _lowercase: Optional[str] = field(default=snake_case_ , metadata={'''help''': '''Source language id for translation.'''} ) _lowercase: Optional[str] = field(default=snake_case_ , metadata={'''help''': '''Target language id for translation.'''} ) _lowercase: Optional[int] = field(default=snake_case_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) _lowercase: bool = field( default=snake_case_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" logger.info(f"***** {split} metrics *****" ) for key in sorted(metrics.keys() ): logger.info(f" {key} = {metrics[key]}" ) save_json(lowerCAmelCase , os.path.join(lowerCAmelCase , f"{split}_results.json" ) ) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses() check_output_dir(lowerCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): assert hasattr(lowerCAmelCase , lowerCAmelCase ), f"({config.__class__.__name__}) doesn't have a `{p}` attribute" setattr(lowerCAmelCase , lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) ) _lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _lowerCAmelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(lowerCAmelCase , lowerCAmelCase ): _lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _lowerCAmelCase = SeqaSeqDataset # Get datasets _lowerCAmelCase = ( dataset_class( lowerCAmelCase , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) _lowerCAmelCase = ( dataset_class( lowerCAmelCase , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _lowerCAmelCase = ( dataset_class( lowerCAmelCase , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer _lowerCAmelCase = ( build_compute_metrics_fn(data_args.task , lowerCAmelCase ) if training_args.predict_with_generate else None ) _lowerCAmelCase = SeqaSeqTrainer( model=lowerCAmelCase , args=lowerCAmelCase , data_args=lowerCAmelCase , train_dataset=lowerCAmelCase , eval_dataset=lowerCAmelCase , data_collator=SeqaSeqDataCollator( lowerCAmelCase , lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , ) _lowerCAmelCase = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) _lowerCAmelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _lowerCAmelCase = train_result.metrics _lowerCAmelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , lowerCAmelCase , training_args.output_dir ) all_metrics.update(lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" ) _lowerCAmelCase = data_args.n_val _lowerCAmelCase = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , lowerCAmelCase , training_args.output_dir ) all_metrics.update(lowerCAmelCase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) _lowerCAmelCase = trainer.predict(test_dataset=lowerCAmelCase , metric_key_prefix="""test""" ) _lowerCAmelCase = test_output.metrics _lowerCAmelCase = data_args.n_test if trainer.is_world_process_zero(): _lowerCAmelCase = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , lowerCAmelCase , training_args.output_dir ) all_metrics.update(lowerCAmelCase ) if training_args.predict_with_generate: _lowerCAmelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase ) _lowerCAmelCase = lmap(str.strip , lowerCAmelCase ) write_txt_file(lowerCAmelCase , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(lowerCAmelCase , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" main() if __name__ == "__main__": main()
207
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase ( metaclass=snake_case_ ): _lowercase: List[Any] = ['''torch''', '''scipy'''] def __init__( self : Tuple , *__snake_case : Dict , **__snake_case : List[str] ) -> str: requires_backends(self , ["""torch""", """scipy"""] ) @classmethod def lowercase__ ( cls : str , *__snake_case : List[str] , **__snake_case : str ) -> Tuple: requires_backends(cls , ["""torch""", """scipy"""] ) @classmethod def lowercase__ ( cls : Optional[int] , *__snake_case : int , **__snake_case : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ["""torch""", """scipy"""] )
207
1
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _lowercase : Union[str, Any] =namedtuple( """_TestCommandArgs""", [ """dataset""", """name""", """cache_dir""", """data_dir""", """all_configs""", """save_infos""", """ignore_verifications""", """force_redownload""", """clear_cache""", ], defaults=[None, None, None, False, False, False, False, False], ) def UpperCAmelCase ( lowercase__ : str , lowercase__ : Optional[int] ): '''simple docstring''' return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def UpperCAmelCase ( lowercase__ : Optional[Any] ): '''simple docstring''' a__ = _TestCommandArgs(dataset=lowerCAmelCase__ , all_configs=lowerCAmelCase__ , save_infos=lowerCAmelCase__ ) a__ = TestCommand(*lowerCAmelCase__ ) test_command.run() a__ = os.path.join(lowerCAmelCase__ , """README.md""" ) assert os.path.exists(lowerCAmelCase__ ) a__ = DatasetInfosDict.from_directory(lowerCAmelCase__ ) a__ = DatasetInfosDict( { """default""": DatasetInfo( features=Features( { """tokens""": Sequence(Value("""string""" ) ), """ner_tags""": Sequence( ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ), """langs""": Sequence(Value("""string""" ) ), """spans""": Sequence(Value("""string""" ) ), } ) , splits=[ { """name""": """train""", """num_bytes""": 2351563, """num_examples""": 10000, }, { """name""": """validation""", """num_bytes""": 238418, """num_examples""": 1000, }, ] , download_size=3940680 , dataset_size=2589981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: a__ = getattr(dataset_infos["""default"""] , lowerCAmelCase__ ), getattr(expected_dataset_infos["""default"""] , lowerCAmelCase__ ) if key == "num_bytes": assert is_apercent_close(lowerCAmelCase__ , lowerCAmelCase__ ) elif key == "splits": assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
709
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput _lowercase : int =logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCAmelCase_ ( A_ ): '''simple docstring''' def __init__( self , *lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ): '''simple docstring''' super().__init__(*lowerCamelCase , **lowerCamelCase ) a__ = eval_examples a__ = post_process_function a__ = quant_trainer_args a__ = 128 # default number of calibration samples def _A ( self , lowerCamelCase=None ): '''simple docstring''' if calib_dataset is None and self.calib_dataset is None: raise ValueError("""Trainer: calibration requires an calib_dataset.""" ) a__ = calib_dataset if calib_dataset is not None else self.calib_dataset a__ = self._remove_unused_columns(lowerCamelCase , description="""Calibration""" ) return DataLoader( lowerCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCamelCase , ) def _A ( self , lowerCamelCase=None ): '''simple docstring''' a__ = self.train_dataset if calib_dataset is None else calib_dataset a__ = self.get_calib_dataloader(lowerCamelCase ) a__ = self.model quant_trainer.configure_model(lowerCamelCase , self.quant_trainer_args , calib=lowerCamelCase ) model.eval() quant_trainer.enable_calibration(lowerCamelCase ) logger.info("""***** Running calibration *****""" ) logger.info(f' Num examples = {self.calib_num}' ) logger.info(f' Batch size = {calib_dataloader.batch_size}' ) for step, inputs in enumerate(lowerCamelCase ): # Prediction step a__ , a__ , a__ = self.prediction_step(lowerCamelCase , lowerCamelCase , prediction_loss_only=lowerCamelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(lowerCamelCase , self.quant_trainer_args ) a__ = model def _A ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = "eval" ): '''simple docstring''' a__ = self.eval_dataset if eval_dataset is None else eval_dataset a__ = self.get_eval_dataloader(lowerCamelCase ) a__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. a__ = self.compute_metrics a__ = None a__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: a__ = eval_loop( lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , ) finally: a__ = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: a__ = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions ) a__ = self.compute_metrics(lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): a__ = metrics.pop(lowerCamelCase ) self.log(lowerCamelCase ) else: a__ = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) a__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase ) return metrics def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase = "test" ): '''simple docstring''' a__ = self.get_test_dataloader(lowerCamelCase ) # Temporarily disable metric computation, we will do it in the loop here. a__ = self.compute_metrics a__ = None a__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: a__ = eval_loop( lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , ) finally: a__ = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output a__ = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions , """predict""" ) a__ = self.compute_metrics(lowerCamelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): a__ = metrics.pop(lowerCamelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase ) def _A ( self , lowerCamelCase="./" ): '''simple docstring''' a__ = self.eval_dataset a__ = self.get_eval_dataloader(lowerCamelCase ) a__ = next(iter(lowerCamelCase ) ) # saving device - to make it consistent a__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) # convert to tuple a__ = tuple(v.to(lowerCamelCase ) for k, v in batch.items() ) logger.info("""Converting model to be onnx compatible""" ) from pytorch_quantization.nn import TensorQuantizer a__ = True a__ = self.model.to(lowerCamelCase ) model.eval() model.float() a__ = model.module if hasattr(lowerCamelCase , """module""" ) else model quant_trainer.configure_model(lowerCamelCase , self.quant_trainer_args ) a__ = os.path.join(lowerCamelCase , """model.onnx""" ) logger.info(f'exporting model to {output_model_file}' ) a__ = {0: """batch_size""", 1: """seq_len"""} torch.onnx.export( lowerCamelCase , lowerCamelCase , lowerCamelCase , export_params=lowerCamelCase , opset_version=13 , do_constant_folding=lowerCamelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={ """input_ids""": axes, """attention_mask""": axes, """token_type_ids""": axes, """output_start_logits""": axes, """output_end_logits""": axes, } , verbose=lowerCamelCase , ) logger.info("""onnx export finished""" )
412
0
"""simple docstring""" def A ( _A = 10, _A = 22 ): """simple docstring""" snake_case_ :List[Any] = range(1, _A ) snake_case_ :Union[str, Any] = range(1, _A ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F'''{solution(10, 22) = }''')
584
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip snake_case__ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase__ ( a : Any ) -> Optional[int]: """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase__ ( a : Dict , a : str , a : Optional[Any] ) -> Tuple: """simple docstring""" return max(metric_fn(a , a ) for gt in ground_truths ) def lowerCamelCase__ ( a : int , a : str , a : Optional[Any] ) -> Tuple: """simple docstring""" a__ :List[str] = [line.strip() for line in open(a , "r" ).readlines()] a__ :Tuple = [] if args.gold_data_mode == "qa": a__ :Optional[Any] = pd.read_csv(a , sep="\t" , header=a ) for answer_list in data[1]: a__ :Union[str, Any] = ast.literal_eval(a ) answers.append(a ) else: a__ :int = [line.strip() for line in open(a , "r" ).readlines()] a__ :int = [[reference] for reference in references] a__ :Tuple = 0 for prediction, ground_truths in zip(a , a ): total += 1 em += metric_max_over_ground_truths(a , a , a ) fa += metric_max_over_ground_truths(a , a , a ) a__ :Optional[int] = 1_0_0.0 * em / total a__ :str = 1_0_0.0 * fa / total logger.info(F'''F1: {fa:.2f}''' ) logger.info(F'''EM: {em:.2f}''' ) def lowerCamelCase__ ( a : Optional[int] , a : Tuple , a : int ) -> Dict: """simple docstring""" a__ :List[Any] = args.k a__ :str = [line.strip() for line in open(a , "r" ).readlines()] a__ :Any = [line.strip() for line in open(a , "r" ).readlines()] a__ :Optional[int] = 0 for hypo, reference in zip(a , a ): a__ :Optional[Any] = set(hypo.split("\t" )[:k] ) a__ :int = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a__ :List[Any] = 1_0_0.0 * em / total logger.info(F'''Precision@{k}: {em: .2f}''' ) def lowerCamelCase__ ( a : Optional[int] , a : Tuple , a : Optional[Any] ) -> Dict: """simple docstring""" def strip_title(a : Any ): if title.startswith("\"" ): a__ :Optional[Any] = title[1:] if title.endswith("\"" ): a__ :Any = title[:-1] return title a__ :int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( a , return_tensors="pt" , padding=a , truncation=a , )["input_ids"].to(args.device ) a__ :Dict = rag_model.rag.question_encoder(a ) a__ :Optional[int] = question_enc_outputs[0] a__ :Optional[Any] = rag_model.retriever( a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a__ :int = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a__ :Any = [] for docs in all_docs: a__ :Tuple = [strip_title(a ) for title in docs["title"]] provenance_strings.append("\t".join(a ) ) return provenance_strings def lowerCamelCase__ ( a : Union[str, Any] , a : Union[str, Any] , a : str ) -> str: """simple docstring""" with torch.no_grad(): a__ :str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( a , return_tensors="pt" , padding=a , truncation=a ) a__ :Tuple = inputs_dict.input_ids.to(args.device ) a__ :int = inputs_dict.attention_mask.to(args.device ) a__ :int = rag_model.generate( # rag_model overwrites generate a , attention_mask=a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a__ :Optional[int] = rag_model.retriever.generator_tokenizer.batch_decode(a , skip_special_tokens=a ) if args.print_predictions: for q, a in zip(a , a ): logger.info("Q: {} - A: {}".format(a , a ) ) return answers def lowerCamelCase__ ( ) -> List[Any]: """simple docstring""" a__ :List[Any] = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=a , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=a , choices=["exact", "compressed", "legacy"] , type=a , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=a , type=a , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=a , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=a , type=a , required=a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=a , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=a , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=a , type=a , required=a , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=a , type=a , required=a , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=a , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=a , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=a , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=a , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=a , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a__ :Tuple = parser.parse_args() a__ :str = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def lowerCamelCase__ ( a : int ) -> List[Any]: """simple docstring""" a__ :Optional[int] = {} if args.model_type is None: a__ :Dict = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a__ :Union[str, Any] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a__ :Union[str, Any] = args.n_docs if args.index_name is not None: a__ :Tuple = args.index_name if args.index_path is not None: a__ :Any = args.index_path else: a__ :Optional[Any] = BartForConditionalGeneration a__ :Optional[Any] = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , a ) a__ :List[Any] = get_scores if args.eval_mode == "e2e" else get_precision_at_k a__ :Optional[Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(a , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(a ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a__ :Dict = RagRetriever.from_pretrained(a , **a ) a__ :Any = model_class.from_pretrained(a , retriever=a , **a ) model.retriever.init_retrieval() else: a__ :int = model_class.from_pretrained(a , **a ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a__ :str = [] for line in tqdm(a ): questions.append(line.strip() ) if len(a ) == args.eval_batch_size: a__ :Union[str, Any] = evaluate_batch_fn(a , a , a ) preds_file.write("\n".join(a ) + "\n" ) preds_file.flush() a__ :Optional[Any] = [] if len(a ) > 0: a__ :List[str] = evaluate_batch_fn(a , a , a ) preds_file.write("\n".join(a ) ) preds_file.flush() score_fn(a , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": snake_case__ = get_args() main(args)
395
0
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class __lowerCAmelCase : _UpperCamelCase : int _UpperCamelCase : TreeNode | None = None _UpperCamelCase : TreeNode | None = None SCREAMING_SNAKE_CASE__ : Tuple = namedtuple("""CoinsDistribResult""", """moves excess""") def _A ( lowerCamelCase ): if root is None: return 0 # Validation def count_nodes(lowerCamelCase ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowerCamelCase ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(lowerCamelCase ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) a__ , a__ : int = get_distrib(node.left ) a__ , a__ : Union[str, Any] = get_distrib(node.right ) a__ : Dict = 1 - left_distrib_excess a__ : Optional[Any] = 1 - right_distrib_excess a__ : Optional[Any] = ( left_distrib_moves + right_distrib_moves + abs(lowerCamelCase ) + abs(lowerCamelCase ) ) a__ : List[str] = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowerCamelCase , lowerCamelCase ) return get_distrib(lowerCamelCase )[0] if __name__ == "__main__": import doctest doctest.testmod()
629
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _A ( lowerCamelCase , lowerCamelCase ): a__ : Dict = old_name if "patch_embed" in old_name: a__ , a__ , a__ : Union[str, Any] = old_name.split("." ) if layer == "0": a__ : Union[str, Any] = old_name.replace("0" , "convolution1" ) elif layer == "1": a__ : Dict = old_name.replace("1" , "batchnorm_before" ) elif layer == "3": a__ : List[str] = old_name.replace("3" , "convolution2" ) else: a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" ) if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ): a__ : List[str] = r"\b\d{2}\b" if bool(re.search(lowerCamelCase , lowerCamelCase ) ): a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group() else: a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group() if int(match[0] ) < 6: a__ : List[Any] = old_name.replace(lowerCamelCase , "" ) a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] ) a__ : List[Any] = "intermediate_stages." + trimmed_name else: a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" ) if int(match[2] ) < num_meta4D_last_stage: a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] ) else: a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage ) a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index ) if "norm1" in old_name: a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" ) elif "norm2" in old_name: a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" ) elif "fc1" in old_name: a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" ) elif "fc2" in old_name: a__ : Any = trimmed_name.replace("fc2" , "linear_out" ) a__ : Any = "last_stage." + trimmed_name elif "network" in old_name and re.search(r".\d." , lowerCamelCase ): a__ : List[str] = old_name.replace("network" , "intermediate_stages" ) if "fc" in new_name: a__ : str = new_name.replace("fc" , "convolution" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): a__ : str = new_name.replace("norm1" , "batchnorm_before" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): a__ : Any = new_name.replace("norm2" , "batchnorm_after" ) if "proj" in new_name: a__ : Optional[int] = new_name.replace("proj" , "projection" ) if "dist_head" in new_name: a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" ) elif "head" in new_name: a__ : Optional[int] = new_name.replace("head" , "classifier" ) elif "patch_embed" in new_name: a__ : Tuple = "efficientformer." + new_name elif new_name == "norm.weight" or new_name == "norm.bias": a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" ) a__ : Optional[int] = "efficientformer." + new_name else: a__ : List[Any] = "efficientformer.encoder." + new_name return new_name def _A ( lowerCamelCase , lowerCamelCase ): for key in checkpoint.copy().keys(): a__ : Optional[Any] = checkpoint.pop(lowerCamelCase ) a__ : Dict = val return checkpoint def _A ( ): a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return image def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"] a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase ) a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase ) a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] ) a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1 a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() a__ : Dict = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } # prepare image a__ : str = prepare_img() a__ : Dict = 256 a__ : Union[str, Any] = 224 a__ : List[str] = EfficientFormerImageProcessor( size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , ) a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values # original processing pipeline a__ : List[str] = Compose( [ Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ), CenterCrop(lowerCamelCase ), ToTensor(), Normalize(lowerCamelCase , lowerCamelCase ), ] ) a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 ) assert torch.allclose(lowerCamelCase , lowerCamelCase ) a__ : Optional[int] = model(lowerCamelCase ) a__ : Any = outputs.logits a__ : Optional[Any] = (1, 1000) if "l1" in model_name: a__ : Tuple = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: a__ : int = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: a__ : Optional[Any] = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(lowerCamelCase ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print("Pushing model to the hub..." ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
629
1
'''simple docstring''' import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class __a : def __init__( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple=14 ,lowerCamelCase : Optional[Any]=7 ,lowerCamelCase : str=True ,lowerCamelCase : List[str]=True ,lowerCamelCase : Dict=True ,lowerCamelCase : Any=True ,lowerCamelCase : int=True ,lowerCamelCase : Dict=99 ,lowerCamelCase : Dict=32 ,lowerCamelCase : Optional[Any]=5 ,lowerCamelCase : Tuple=4 ,lowerCamelCase : Optional[int]=37 ,lowerCamelCase : Optional[int]="gelu" ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Dict=512 ,lowerCamelCase : int=16 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : str=3 ,lowerCamelCase : Union[str, Any]=4 ,lowerCamelCase : Any=None ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = use_mc_token_ids __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope __SCREAMING_SNAKE_CASE = self.vocab_size - 1 def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_mc_token_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' return CTRLConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Any ,lowerCamelCase : List[str] ,lowerCamelCase : str ,*lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = CTRLModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() model(lowerCamelCase ,token_type_ids=lowerCamelCase ,head_mask=lowerCamelCase ) model(lowerCamelCase ,token_type_ids=lowerCamelCase ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer ) def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ,lowerCamelCase : Dict ,*lowerCamelCase : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = CTRLLMHeadModel(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __SCREAMING_SNAKE_CASE = model(lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask} return config, inputs_dict def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : int ,lowerCamelCase : str ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,*lowerCamelCase : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = CTRLForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) @require_torch class __a ( _snake_case, _snake_case, _snake_case, unittest.TestCase ): __UpperCamelCase : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () __UpperCamelCase : Dict = (CTRLLMHeadModel,) if is_torch_available() else () __UpperCamelCase : int = ( { 'feature-extraction': CTRLModel, 'text-classification': CTRLForSequenceClassification, 'text-generation': CTRLLMHeadModel, 'zero-shot': CTRLForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase : List[str] = True __UpperCamelCase : Dict = False __UpperCamelCase : Tuple = False def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Any ,lowerCamelCase : str ,lowerCamelCase : List[str] ,lowerCamelCase : int ,lowerCamelCase : Dict ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = CTRLModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,n_embd=37 ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*lowerCamelCase ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCamelCase ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass @slow def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = CTRLModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' pass @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self : int ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = CTRLLMHeadModel.from_pretrained("""ctrl""" ) model.to(lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.tensor( [[1_1859, 0, 1611, 8]] ,dtype=torch.long ,device=lowerCamelCase ) # Legal the president is __SCREAMING_SNAKE_CASE = [ 1_1859, 0, 1611, 8, 5, 150, 2_6449, 2, 19, 348, 469, 3, 2595, 48, 2_0740, 24_6533, 24_6533, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ,do_sample=lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() ,lowerCamelCase )
109
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging a = logging.get_logger(__name__) def __magic_name__ ( __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Dict: '''simple docstring''' return field(default_factory=lambda: default , metadata=__UpperCAmelCase ) @dataclass class __a : __UpperCamelCase : List[str] = list_field( default=[], metadata={ 'help': ( 'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version' ' of all available models' ) }, ) __UpperCamelCase : List[int] = list_field( default=[8], metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} ) __UpperCamelCase : List[int] = list_field( default=[8, 32, 128, 512], metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} ) __UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Use FP16 to accelerate inference.'} ) __UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Benchmark training of model'} ) __UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Verbose memory tracing'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory' }, ) __UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Trace memory line by line'} ) __UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Save result to a CSV file'} ) __UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Save all print statements in a log file'} ) __UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Whether to print environment information'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use' ' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled' ' for debugging / testing and on TPU.' ) }, ) __UpperCamelCase : str = field( default=F'inference_time_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving time results to csv.'}, ) __UpperCamelCase : str = field( default=F'inference_memory_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving memory results to csv.'}, ) __UpperCamelCase : str = field( default=F'train_time_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving time results to csv for training.'}, ) __UpperCamelCase : str = field( default=F'train_memory_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving memory results to csv for training.'}, ) __UpperCamelCase : str = field( default=F'env_info_{round(time() )}.csv', metadata={'help': 'CSV filename used if saving environment information.'}, ) __UpperCamelCase : str = field( default=F'log_{round(time() )}.csv', metadata={'help': 'Log filename used if print statements are saved in log.'}, ) __UpperCamelCase : int = field(default=3, metadata={'help': 'Times an experiment will be run.'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain' ' model weights.' ) }, ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' warnings.warn( f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" ,lowerCamelCase ,) def UpperCAmelCase__ ( self : int ): '''simple docstring''' return json.dumps(dataclasses.asdict(self ) ,indent=2 ) @property def UpperCAmelCase__ ( self : str ): '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
109
1
import math import unittest def UpperCAmelCase_ ( snake_case__ ) -> bool: """simple docstring""" assert isinstance(snake_case__ , snake_case__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" with self.assertRaises(a_ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) ,'Zero doesn\'t have any positive factors, primes must have exactly two.' ,) self.assertFalse( is_prime(1 ) ,'One only has 1 positive factor, primes must have exactly two.' ,) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
719
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: _lowerCAmelCase : List[str] = None _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : Union[str, Any] = { "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json" ), }, } _lowerCAmelCase : Union[str, Any] = { "moussaKam/mbarthez": 1_0_2_4, "moussaKam/barthez": 1_0_2_4, "moussaKam/barthez-orangesum-title": 1_0_2_4, } _lowerCAmelCase : int = "▁" class __snake_case ( SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask'] SCREAMING_SNAKE_CASE__ = BarthezTokenizer def __init__( self ,a_=None ,a_=None ,a_="<s>" ,a_="</s>" ,a_="</s>" ,a_="<s>" ,a_="<unk>" ,a_="<pad>" ,a_="<mask>" ,**a_ ,): """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase__ = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else mask_token super().__init__( a_ ,tokenizer_file=a_ ,bos_token=a_ ,eos_token=a_ ,unk_token=a_ ,sep_token=a_ ,cls_token=a_ ,pad_token=a_ ,mask_token=a_ ,**a_ ,) lowerCAmelCase__ = vocab_file lowerCAmelCase__ = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] lowerCAmelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ): """simple docstring""" lowerCAmelCase__ = [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(a_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase__ = os.path.join( a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file ,a_ ) return (out_vocab_file,)
604
0
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , ) ->str: '''simple docstring''' lowerCamelCase__: Union[str, Any] =parent lowerCamelCase__: Optional[Any] =13 lowerCamelCase__: List[str] =7 lowerCamelCase__: Any =True lowerCamelCase__: List[Any] =True lowerCamelCase__: Optional[int] =False lowerCamelCase__: int =True lowerCamelCase__: Optional[int] =99 lowerCamelCase__: Any =32 lowerCamelCase__: Tuple =2 lowerCamelCase__: Union[str, Any] =4 lowerCamelCase__: Union[str, Any] =37 lowerCamelCase__: List[Any] ="gelu" lowerCamelCase__: Optional[int] =0.1 lowerCamelCase__: Optional[Any] =0.1 lowerCamelCase__: Optional[Any] =512 lowerCamelCase__: str =16 lowerCamelCase__: int =2 lowerCamelCase__: List[Any] =0.02 lowerCamelCase__: Optional[int] =3 lowerCamelCase__: Dict =4 lowerCamelCase__: Tuple =None def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]: '''simple docstring''' lowerCamelCase__: List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowerCamelCase__: List[Any] =None if self.use_input_mask: lowerCamelCase__: List[str] =random_attention_mask([self.batch_size, self.seq_length]) lowerCamelCase__: Optional[int] =None lowerCamelCase__: Optional[int] =None lowerCamelCase__: int =None if self.use_labels: lowerCamelCase__: List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size) lowerCamelCase__: List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowerCamelCase__: Optional[int] =ids_tensor([self.batch_size] , self.num_choices) lowerCamelCase__: str =DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any) ->Dict: '''simple docstring''' lowerCamelCase__: List[Any] =TFDistilBertModel(config=UpperCAmelCase_) lowerCamelCase__: Optional[int] ={"input_ids": input_ids, "attention_mask": input_mask} lowerCamelCase__: List[str] =model(UpperCAmelCase_) lowerCamelCase__: List[Any] =[input_ids, input_mask] lowerCamelCase__: Any =model(UpperCAmelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any) ->Tuple: '''simple docstring''' lowerCamelCase__: List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase_) lowerCamelCase__: Dict ={"input_ids": input_ids, "attention_mask": input_mask} lowerCamelCase__: List[Any] =model(UpperCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Tuple: '''simple docstring''' lowerCamelCase__: List[str] =TFDistilBertForQuestionAnswering(config=UpperCAmelCase_) lowerCamelCase__: Optional[Any] ={ "input_ids": input_ids, "attention_mask": input_mask, } lowerCamelCase__: int =model(UpperCAmelCase_) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict) ->int: '''simple docstring''' lowerCamelCase__: Optional[int] =self.num_labels lowerCamelCase__: Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase_) lowerCamelCase__: Optional[int] ={"input_ids": input_ids, "attention_mask": input_mask} lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Dict =self.num_choices lowerCamelCase__: Any =TFDistilBertForMultipleChoice(UpperCAmelCase_) lowerCamelCase__: Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase_ , 1) , (1, self.num_choices, 1)) lowerCamelCase__: Tuple =tf.tile(tf.expand_dims(UpperCAmelCase_ , 1) , (1, self.num_choices, 1)) lowerCamelCase__: Dict ={ "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } lowerCamelCase__: Tuple =model(UpperCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]) ->str: '''simple docstring''' lowerCamelCase__: Union[str, Any] =self.num_labels lowerCamelCase__: List[str] =TFDistilBertForTokenClassification(UpperCAmelCase_) lowerCamelCase__: str ={"input_ids": input_ids, "attention_mask": input_mask} lowerCamelCase__: Optional[int] =model(UpperCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: List[Any] =self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)): int =config_and_inputs lowerCamelCase__: Optional[int] ={"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) lowercase_ = ( { "feature-extraction": TFDistilBertModel, "fill-mask": TFDistilBertForMaskedLM, "question-answering": TFDistilBertForQuestionAnswering, "text-classification": TFDistilBertForSequenceClassification, "token-classification": TFDistilBertForTokenClassification, "zero-shot": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) lowercase_ = False lowercase_ = False def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' lowerCamelCase__: Tuple =TFDistilBertModelTester(self) lowerCamelCase__: Dict =ConfigTester(self , config_class=UpperCAmelCase_ , dim=37) def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ (self : Any) ->int: '''simple docstring''' lowerCamelCase__: Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict: '''simple docstring''' lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]: '''simple docstring''' lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase_) @slow def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict: '''simple docstring''' for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]): lowerCamelCase__: str =TFDistilBertModel.from_pretrained(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_) @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict: '''simple docstring''' lowerCamelCase__: List[str] =TFDistilBertModel.from_pretrained("distilbert-base-uncased") lowerCamelCase__: Any =tf.constant([[0, 1, 2, 3, 4, 5]]) lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_)[0] lowerCamelCase__: Optional[int] =[1, 6, 768] self.assertEqual(output.shape , UpperCAmelCase_) lowerCamelCase__: List[Any] =tf.constant( [ [ [0.1926_1885, -0.1373_2955, 0.411_9799], [0.2215_0156, -0.0742_2661, 0.3903_7204], [0.2275_6018, -0.089_6414, 0.370_1467], ] ]) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4)
59
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Optional[Any] = logging.get_logger(__name__) def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ) ->int: _UpperCAmelCase =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"blocks.{i}.norm1.weight", F"deit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"blocks.{i}.norm1.bias", F"deit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((F"blocks.{i}.attn.proj.weight", F"deit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"blocks.{i}.attn.proj.bias", F"deit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"blocks.{i}.norm2.weight", F"deit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"blocks.{i}.norm2.bias", F"deit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"deit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"deit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"deit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"deit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" _UpperCAmelCase =[(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->List[Any]: for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase ="" else: _UpperCAmelCase ="deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase =state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) _UpperCAmelCase =state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase =in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase =in_proj_bias[: config.hidden_size] _UpperCAmelCase =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase =in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase =in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict: _UpperCAmelCase =dct.pop(_lowerCamelCase ) _UpperCAmelCase =val def lowerCamelCase__ ( ) ->int: _UpperCAmelCase ="http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCAmelCase =Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->List[str]: _UpperCAmelCase =DeiTConfig() # all deit models have fine-tuned heads _UpperCAmelCase =False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size _UpperCAmelCase =1000 _UpperCAmelCase ="huggingface/label-files" _UpperCAmelCase ="imagenet-1k-id2label.json" _UpperCAmelCase =json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _UpperCAmelCase ={int(_lowerCamelCase ): v for k, v in idalabel.items()} _UpperCAmelCase =idalabel _UpperCAmelCase ={v: k for k, v in idalabel.items()} _UpperCAmelCase =int(deit_name[-6:-4] ) _UpperCAmelCase =int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): _UpperCAmelCase =192 _UpperCAmelCase =768 _UpperCAmelCase =12 _UpperCAmelCase =3 elif deit_name[9:].startswith("small" ): _UpperCAmelCase =384 _UpperCAmelCase =1536 _UpperCAmelCase =12 _UpperCAmelCase =6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): _UpperCAmelCase =1024 _UpperCAmelCase =4096 _UpperCAmelCase =24 _UpperCAmelCase =16 # load original model from timm _UpperCAmelCase =timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCAmelCase =timm_model.state_dict() _UpperCAmelCase =create_rename_keys(_lowerCamelCase , _lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # load HuggingFace model _UpperCAmelCase =DeiTForImageClassificationWithTeacher(_lowerCamelCase ).eval() model.load_state_dict(_lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor _UpperCAmelCase =int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 _UpperCAmelCase =DeiTImageProcessor(size=_lowerCamelCase , crop_size=config.image_size ) _UpperCAmelCase =image_processor(images=prepare_img() , return_tensors="pt" ) _UpperCAmelCase =encoding["pixel_values"] _UpperCAmelCase =model(_lowerCamelCase ) _UpperCAmelCase =timm_model(_lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F"Saving model {deit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCamelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) snake_case__ : List[str] = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
408
0
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase: Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = XGLMTokenizer SCREAMING_SNAKE_CASE_ : str = XGLMTokenizerFast SCREAMING_SNAKE_CASE_ : Union[str, Any] = True SCREAMING_SNAKE_CASE_ : Optional[int] = True def lowerCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowercase : List[Any] = XGLMTokenizer(lowerCAmelCase_ ,keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : int = """<pad>""" _lowercase : List[str] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) ,lowerCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) ,lowerCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<s>""" ) self.assertEqual(vocab_keys[1] ,"""<pad>""" ) self.assertEqual(len(lowerCAmelCase_ ) ,10_08 ) def lowerCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,10_08 ) def lowerCamelCase__ ( self ): _lowercase : Tuple = XGLMTokenizer(lowerCAmelCase_ ,keep_accents=lowerCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,) _lowercase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) _lowercase : int = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) _lowercase : Tuple = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) @cached_property def lowerCamelCase__ ( self ): return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def lowerCamelCase__ ( self ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCAmelCase_ ,f.name ) _lowercase : Union[str, Any] = XGLMTokenizer(f.name ,keep_accents=lowerCAmelCase_ ) _lowercase : Optional[Any] = pickle.dumps(lowerCAmelCase_ ) pickle.loads(lowerCAmelCase_ ) def lowerCamelCase__ ( self ): if not self.test_rust_tokenizer: return _lowercase : int = self.get_tokenizer() _lowercase : List[str] = self.get_rust_tokenizer() _lowercase : List[str] = """I was born in 92000, and this is falsé.""" _lowercase : Any = tokenizer.tokenize(lowerCAmelCase_ ) _lowercase : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ ,lowerCAmelCase_ ) _lowercase : List[Any] = tokenizer.encode(lowerCAmelCase_ ,add_special_tokens=lowerCAmelCase_ ) _lowercase : Dict = rust_tokenizer.encode(lowerCAmelCase_ ,add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ ,lowerCAmelCase_ ) _lowercase : int = self.get_rust_tokenizer() _lowercase : Dict = tokenizer.encode(lowerCAmelCase_ ) _lowercase : str = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ ,lowerCAmelCase_ ) @slow def lowerCamelCase__ ( self ): _lowercase : List[str] = """Hello World!""" _lowercase : str = [2, 3_12_27, 44_47, 35] self.assertListEqual(lowerCAmelCase_ ,self.big_tokenizer.encode(lowerCAmelCase_ ) ) @slow def lowerCamelCase__ ( self ): _lowercase : Any = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off _lowercase : Union[str, Any] = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35] # fmt: on self.assertListEqual(lowerCAmelCase_ ,self.big_tokenizer.encode(lowerCAmelCase_ ) ) @slow def lowerCamelCase__ ( self ): _lowercase : List[Any] = { """input_ids""": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ ,model_name="""facebook/xglm-564M""" ,padding=lowerCAmelCase_ ,)
712
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
600
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCamelCase__ ( __lowerCamelCase : Tuple ): __UpperCAmelCase : str = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2] __UpperCAmelCase : Any = True if """large""" in model_name or """huge""" in model_name else False __UpperCAmelCase : int = True if """large""" in model_name or """huge""" in model_name else False __UpperCAmelCase : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __UpperCAmelCase : Union[str, Any] = [3, 3, 3, 3] __UpperCAmelCase : Union[str, Any] = [5, 5, 5, 5] elif "fl4" in model_name: __UpperCAmelCase : str = [4, 4, 4, 4] __UpperCAmelCase : Optional[Any] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __UpperCAmelCase : Dict = [3, 3, 3, 3] if "lrf" in model_name: __UpperCAmelCase : Optional[Any] = [3, 3, 3, 3] else: __UpperCAmelCase : Optional[int] = [2, 2, 2, 2] if "tiny" in model_name: __UpperCAmelCase : List[str] = 96 elif "small" in model_name: __UpperCAmelCase : Dict = 96 elif "base" in model_name: __UpperCAmelCase : List[Any] = 128 elif "large" in model_name: __UpperCAmelCase : Any = 192 elif "xlarge" in model_name: __UpperCAmelCase : Tuple = 256 elif "huge" in model_name: __UpperCAmelCase : int = 352 # set label information __UpperCAmelCase : Tuple = """huggingface/label-files""" if "large" in model_name or "huge" in model_name: __UpperCAmelCase : Any = """imagenet-22k-id2label.json""" else: __UpperCAmelCase : Dict = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : Optional[int] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} __UpperCAmelCase : List[str] = FocalNetConfig( embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , ) return config def lowerCamelCase__ ( __lowerCamelCase : Tuple ): if "patch_embed.proj" in name: __UpperCAmelCase : List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __UpperCAmelCase : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: __UpperCAmelCase : int = """encoder.""" + name if "encoder.layers" in name: __UpperCAmelCase : Optional[int] = name.replace("""encoder.layers""" , """encoder.stages""" ) if "downsample.proj" in name: __UpperCAmelCase : Optional[Any] = name.replace("""downsample.proj""" , """downsample.projection""" ) if "blocks" in name: __UpperCAmelCase : Union[str, Any] = name.replace("""blocks""" , """layers""" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __UpperCAmelCase : List[str] = name.replace("""modulation.f""" , """modulation.projection_in""" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __UpperCAmelCase : List[Any] = name.replace("""modulation.h""" , """modulation.projection_context""" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __UpperCAmelCase : str = name.replace("""modulation.proj""" , """modulation.projection_out""" ) if name == "norm.weight": __UpperCAmelCase : Optional[Any] = """layernorm.weight""" if name == "norm.bias": __UpperCAmelCase : Dict = """layernorm.bias""" if "head" in name: __UpperCAmelCase : Tuple = name.replace("""head""" , """classifier""" ) else: __UpperCAmelCase : int = """focalnet.""" + name return name def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=False ): # fmt: off __UpperCAmelCase : Dict = { """focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""", """focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""", """focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""", """focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""", """focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""", """focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""", """focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""", """focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""", """focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""", """focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""", } # fmt: on __UpperCAmelCase : int = model_name_to_url[model_name] print("""Checkpoint URL: """ , __lowerCamelCase ) __UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""model"""] # rename keys for key in state_dict.copy().keys(): __UpperCAmelCase : Tuple = state_dict.pop(__lowerCamelCase ) __UpperCAmelCase : Optional[Any] = val __UpperCAmelCase : Optional[Any] = get_focalnet_config(__lowerCamelCase ) __UpperCAmelCase : Any = FocalNetForImageClassification(__lowerCamelCase ) model.eval() # load state dict model.load_state_dict(__lowerCamelCase ) # verify conversion __UpperCAmelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Union[str, Any] = BitImageProcessor( do_resize=__lowerCamelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=224 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , ) __UpperCAmelCase : Optional[Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) __UpperCAmelCase : List[Any] = processor(images=__lowerCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : str = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) __UpperCAmelCase : List[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 ) __UpperCAmelCase : Dict = model(**__lowerCamelCase ) __UpperCAmelCase : Any = outputs.logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) print("""First values of logits:""" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __UpperCAmelCase : Union[str, Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": __UpperCAmelCase : Dict = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": __UpperCAmelCase : int = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": __UpperCAmelCase : int = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": __UpperCAmelCase : Optional[int] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": __UpperCAmelCase : Optional[Any] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print(f"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(f"""{model_name}""" ) processor.push_to_hub(f"""{model_name}""" ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="focalnet-tiny", type=str, help="Name of the FocalNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub.", ) a : Any = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
63
import os def _SCREAMING_SNAKE_CASE ( a ) -> int: __A : Any = len(grid[0] ) __A : Tuple = len(a ) __A : Tuple = 0 __A : Any = 0 __A : Optional[Any] = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(a ): for j in range(n_rows - 3 ): __A : Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] __A : Optional[int] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: __A : str = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: __A : List[str] = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) __A : int = max( a , a , a , a ) if max_product > largest: __A : Union[str, Any] = max_product return largest def _SCREAMING_SNAKE_CASE ( ) -> int: __A : Optional[int] = [] with open(os.path.dirname(a ) + '/grid.txt' ) as file: for line in file: grid.append(line.strip('\n' ).split(' ' ) ) __A : Optional[Any] = [[int(a ) for i in grid[j]] for j in range(len(a ) )] return largest_product(a ) if __name__ == "__main__": print(solution())
239
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _lowerCAmelCase : int = get_tests_dir("fixtures") _lowerCAmelCase : List[Any] = get_tests_dir("fixtures/dummy_feature_extractor_config.json") _lowerCAmelCase : str = get_tests_dir("fixtures/dummy-config.json") class _UpperCamelCase ( unittest.TestCase ): def UpperCAmelCase_ ( self :int ) -> Dict: UpperCAmelCase__ = 0 def UpperCAmelCase_ ( self :Union[str, Any] ) -> str: UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(__a , __a ) def UpperCAmelCase_ ( self :Tuple ) -> List[str]: UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCAmelCase_ ( self :List[Any] ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(__a ).to_dict() config_dict.pop("feature_extractor_type" ) UpperCAmelCase__ = WavaVecaFeatureExtractor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(__a ) # make sure private variable is not incorrectly saved UpperCAmelCase__ = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def UpperCAmelCase_ ( self :List[str] ) -> str: UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCAmelCase_ ( self :Any ) -> Tuple: with self.assertRaisesRegex( __a , "bert-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained("bert-base" ) def UpperCAmelCase_ ( self :Optional[Any] ) -> int: with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" ) def UpperCAmelCase_ ( self :List[str] ) -> Tuple: with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def UpperCAmelCase_ ( self :Union[str, Any] ) -> List[Any]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) def UpperCAmelCase_ ( self :int ) -> Optional[Any]: try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoFeatureExtractor.register(__a , __a ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase__ = CustomFeatureExtractor.from_pretrained(__a ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def UpperCAmelCase_ ( self :int ) -> int: class _UpperCamelCase ( __lowercase ): UpperCAmelCase_ = True try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # If remote code is not set, the default is to use local UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
720
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer _lowerCAmelCase : Tuple = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : str = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } _lowerCAmelCase : List[Any] = { "allenai/led-base-16384": 1_6_3_8_4, } class _UpperCamelCase ( lowerCAmelCase ): UpperCAmelCase_ = VOCAB_FILES_NAMES UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ = LEDTokenizer UpperCAmelCase_ = ["""input_ids""", """attention_mask"""] def __init__( self :Dict , lowerCamelCase :Any=None , lowerCamelCase :Dict=None , lowerCamelCase :Dict=None , lowerCamelCase :int="replace" , lowerCamelCase :List[Any]="<s>" , lowerCamelCase :Optional[Any]="</s>" , lowerCamelCase :Optional[Any]="</s>" , lowerCamelCase :int="<s>" , lowerCamelCase :Optional[Any]="<unk>" , lowerCamelCase :Dict="<pad>" , lowerCamelCase :Tuple="<mask>" , lowerCamelCase :str=False , lowerCamelCase :Union[str, Any]=True , **lowerCamelCase :List[str] , ) -> Tuple: super().__init__( lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , **lowerCamelCase , ) UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space: UpperCAmelCase__ = getattr(lowerCamelCase , pre_tok_state.pop("type" ) ) UpperCAmelCase__ = add_prefix_space UpperCAmelCase__ = pre_tok_class(**lowerCamelCase ) UpperCAmelCase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase__ = "post_processor" UpperCAmelCase__ = getattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase ) if tokenizer_component_instance: UpperCAmelCase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase__ = tuple(state["sep"] ) if "cls" in state: UpperCAmelCase__ = tuple(state["cls"] ) UpperCAmelCase__ = False if state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space: UpperCAmelCase__ = add_prefix_space UpperCAmelCase__ = True if state.get("trim_offsets" , lowerCamelCase ) != trim_offsets: UpperCAmelCase__ = trim_offsets UpperCAmelCase__ = True if changes_to_apply: UpperCAmelCase__ = getattr(lowerCamelCase , state.pop("type" ) ) UpperCAmelCase__ = component_class(**lowerCamelCase ) setattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def UpperCAmelCase_ ( self :List[str] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :List[str] ) -> Dict: UpperCAmelCase__ = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else value UpperCAmelCase__ = value def UpperCAmelCase_ ( self :Optional[Any] , *lowerCamelCase :Any , **lowerCamelCase :int ) -> BatchEncoding: UpperCAmelCase__ = kwargs.get("is_split_into_words" , lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase ) def UpperCAmelCase_ ( self :int , *lowerCamelCase :Optional[Any] , **lowerCamelCase :Optional[int] ) -> BatchEncoding: UpperCAmelCase__ = kwargs.get("is_split_into_words" , lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase , **lowerCamelCase ) def UpperCAmelCase_ ( self :Any , lowerCamelCase :str , lowerCamelCase :Optional[str] = None ) -> Tuple[str]: UpperCAmelCase__ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase ) return tuple(lowerCamelCase ) def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Optional[Any]=None ) -> Dict: UpperCAmelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :List[int] , lowerCamelCase :Optional[List[int]] = None ) -> List[int]: UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase :Optional[int] = None , lowerCamelCase :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase :Optional[int] = None , lowerCamelCase :Optional[bool] = None , ) -> dict: UpperCAmelCase__ = super()._pad( encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: UpperCAmelCase__ = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase__ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase__ = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase ) if needs_to_be_padded: UpperCAmelCase__ = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase__ = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase__ = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
364
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class __snake_case ( _lowercase): snake_case__ : int = "donut-swin" snake_case__ : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Optional[int] , __lowerCAmelCase : str=2_2_4 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : List[Any]=9_6 , __lowerCAmelCase : int=[2, 2, 6, 2] , __lowerCAmelCase : List[Any]=[3, 6, 1_2, 2_4] , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[Any]=4.0 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : List[str]=1E-5 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase ) _lowerCamelCase : Any = image_size _lowerCamelCase : List[str] = patch_size _lowerCamelCase : Optional[Any] = num_channels _lowerCamelCase : Any = embed_dim _lowerCamelCase : List[Any] = depths _lowerCamelCase : Tuple = len(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = num_heads _lowerCamelCase : Optional[Any] = window_size _lowerCamelCase : List[str] = mlp_ratio _lowerCamelCase : List[str] = qkv_bias _lowerCamelCase : Dict = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Union[str, Any] = drop_path_rate _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Dict = use_absolute_embeddings _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : List[str] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : List[str] = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
83
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( '''files''', [ ['''full:README.md''', '''dataset_infos.json'''], ['''empty:README.md''', '''dataset_infos.json'''], ['''dataset_infos.json'''], ['''full:README.md'''], ], ) def snake_case_ ( A_ : Dict, A_ : List[str] ): '''simple docstring''' _lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' ) if "full:README.md" in files: with open(dataset_infos_dir / '''README.md''', '''w''' ) as f: f.write('''---\ndataset_info:\n dataset_size: 42\n---''' ) if "empty:README.md" in files: with open(dataset_infos_dir / '''README.md''', '''w''' ) as f: f.write('''''' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f: f.write('''{"default": {"dataset_size": 42}}''' ) _lowerCamelCase : str = DatasetInfosDict.from_directory(A_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( '''dataset_info''', [ DatasetInfo(), DatasetInfo( description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ), ], ) def snake_case_ ( A_ : str, A_ : DatasetInfo ): '''simple docstring''' _lowerCamelCase : Optional[Any] = str(A_ ) dataset_info.write_to_directory(A_ ) _lowerCamelCase : str = DatasetInfo.from_directory(A_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = DatasetInfo( description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, ) _lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict() assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) ) _lowerCamelCase : str = yaml.safe_dump(A_ ) _lowerCamelCase : Tuple = yaml.safe_load(A_ ) assert dataset_info_yaml_dict == reloaded def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : int = DatasetInfo() _lowerCamelCase : Dict = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( '''dataset_infos_dict''', [ DatasetInfosDict(), DatasetInfosDict({'''default''': DatasetInfo()} ), DatasetInfosDict({'''my_config_name''': DatasetInfo()} ), DatasetInfosDict( { '''default''': DatasetInfo( description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ) } ), DatasetInfosDict( { '''v1''': DatasetInfo(dataset_size=42 ), '''v2''': DatasetInfo(dataset_size=13_37 ), } ), ], ) def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ): '''simple docstring''' _lowerCamelCase : List[str] = str(A_ ) dataset_infos_dict.write_to_directory(A_ ) _lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _lowerCamelCase : str = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(A_, '''README.md''' ) )
83
1
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowercase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase ( lowerCAmelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> int: warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , lowerCAmelCase__ , ) if isinstance(lowerCAmelCase__ , torch.Tensor ): return image elif isinstance(lowerCAmelCase__ , PIL.Image.Image ): __a = [image] if isinstance(image[0] , PIL.Image.Image ): __a , __a = image[0].size __a , __a = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 __a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] __a = np.concatenate(lowerCAmelCase__ , axis=0 ) __a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0 __a = image.transpose(0 , 3 , 1 , 2 ) __a = 2.0 * image - 1.0 __a = torch.from_numpy(lowerCAmelCase__ ) elif isinstance(image[0] , torch.Tensor ): __a = torch.cat(lowerCAmelCase__ , dim=0 ) return image def lowercase ( lowerCAmelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[str]: if isinstance(lowerCAmelCase__ , torch.Tensor ): return mask elif isinstance(lowerCAmelCase__ , PIL.Image.Image ): __a = [mask] if isinstance(mask[0] , PIL.Image.Image ): __a , __a = mask[0].size __a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __a = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] __a = np.concatenate(lowerCAmelCase__ , axis=0 ) __a = mask.astype(np.floataa ) / 2_55.0 __a = 0 __a = 1 __a = torch.from_numpy(lowerCAmelCase__ ) elif isinstance(mask[0] , torch.Tensor ): __a = torch.cat(lowerCAmelCase__ , dim=0 ) return mask class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : Optional[int] = 4_2 __UpperCAmelCase : str = 4_2 def __init__( self , _a , _a ): super().__init__() self.register_modules(unet=_a , scheduler=_a ) @torch.no_grad() def __call__( self , _a , _a , _a = 250 , _a = 0.0 , _a = 10 , _a = 10 , _a = None , _a = "pil" , _a = True , ): __a = image __a = _preprocess_image(_a ) __a = original_image.to(device=self.device , dtype=self.unet.dtype ) __a = _preprocess_mask(_a ) __a = mask_image.to(device=self.device , dtype=self.unet.dtype ) __a = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(_a , _a ) and len(_a ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(_a )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) __a = original_image.shape __a = randn_tensor(_a , generator=_a , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(_a , _a , _a , self.device ) __a = eta __a = self.scheduler.timesteps[0] + 1 __a = generator[0] if isinstance(_a , _a ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual __a = self.unet(_a , _a ).sample # compute previous image: x_t -> x_t-1 __a = self.scheduler.step(_a , _a , _a , _a , _a , _a ).prev_sample else: # compute the reverse: x_t-1 -> x_t __a = self.scheduler.undo_step(_a , _a , _a ) __a = t __a = (image / 2 + 0.5).clamp(0 , 1 ) __a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a = self.numpy_to_pil(_a ) if not return_dict: return (image,) return ImagePipelineOutput(images=_a )
702
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ) -> str: assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Tuple: __a = tmp_path / '''cache''' __a = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read() _check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]: __a = tmp_path / '''cache''' __a = {'''text''': '''string'''} __a = features.copy() if features else default_expected_features __a = ( Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None ) __a = TextDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read() _check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> Optional[Any]: __a = tmp_path / '''cache''' __a = {'''text''': '''string'''} __a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read() _check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict: if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ): __a = text_path elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ): __a = [text_path] __a = tmp_path / '''cache''' __a = {'''text''': '''string'''} __a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read() _check_text_dataset(lowerCAmelCase__ , lowerCAmelCase__ ) def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=("train",) ) -> Optional[Any]: assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for split in splits: __a = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]: __a = tmp_path / '''cache''' __a = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __a = TextDatasetReader({'''train''': text_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read() _check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> str: __a = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __a = {'''text''': '''string'''} __a = features.copy() if features else default_expected_features __a = ( Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None ) __a = TextDatasetReader({'''train''': text_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read() _check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Dict: if split: __a = {split: text_path} else: __a = '''train''' __a = {'''train''': text_path, '''test''': text_path} __a = tmp_path / '''cache''' __a = {'''text''': '''string'''} __a = TextDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read() _check_text_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
65
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __lowercase (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = tempfile.mkdtemp() # fmt: off SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on SCREAMING_SNAKE_CASE_ : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase__ ) ) SCREAMING_SNAKE_CASE_ : List[Any] = { 'do_resize': True, 'size': 2_0, 'do_center_crop': True, 'crop_size': 1_8, 'do_normalize': True, 'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073], 'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711], } SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname , lowerCAmelCase__ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase__ ( self , **lowerCAmelCase__ ): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def UpperCamelCase__ ( self , **lowerCAmelCase__ ): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def UpperCamelCase__ ( self , **lowerCAmelCase__ ): """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : int = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE_ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ ) self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ ) self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 ) SCREAMING_SNAKE_CASE_ : List[str] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Any = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(lowerCAmelCase__ , return_tensors='np' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(images=lowerCAmelCase__ , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = self.get_image_processor() SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer' SCREAMING_SNAKE_CASE_ : Any = processor(text=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(lowerCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Any = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__ ): processor() def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ : List[Any] = processor.batch_decode(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : int = 'lower newer' SCREAMING_SNAKE_CASE_ : str = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Tuple = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
101
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : int = CTRLTokenizer A : Tuple = False A : Any = False def UpperCAmelCase ( self : int) -> Any: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>'] lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase)))) lowercase__ = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', ''] lowercase__ = {'unk_token': '<unk>'} lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(lowerCAmelCase) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(lowerCAmelCase)) def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map) return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Optional[int]) -> Optional[Any]: """simple docstring""" lowercase__ = 'adapt react readapt apt' lowercase__ = 'adapt react readapt apt' return input_text, output_text def UpperCAmelCase ( self : Optional[int]) -> str: """simple docstring""" lowercase__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) lowercase__ = 'adapt react readapt apt' lowercase__ = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split() lowercase__ = tokenizer.tokenize(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokens + [tokenizer.unk_token] lowercase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , lowerCAmelCase)
622
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class __lowercase : def __init__(self , A , ): lowerCamelCase_ : Optional[int] = parent lowerCamelCase_ : List[Any] = 1_3 lowerCamelCase_ : Optional[int] = 7 lowerCamelCase_ : Any = True lowerCamelCase_ : Tuple = True lowerCamelCase_ : List[Any] = False lowerCamelCase_ : Any = True lowerCamelCase_ : Optional[int] = 9_9 lowerCamelCase_ : int = 3_2 lowerCamelCase_ : Dict = 2 lowerCamelCase_ : Dict = 4 lowerCamelCase_ : Optional[int] = 3_7 lowerCamelCase_ : Tuple = '''gelu''' lowerCamelCase_ : Optional[Any] = 0.1 lowerCamelCase_ : Dict = 0.1 lowerCamelCase_ : str = 5_1_2 lowerCamelCase_ : List[Any] = 1_6 lowerCamelCase_ : Dict = 2 lowerCamelCase_ : List[str] = 0.02 lowerCamelCase_ : int = 3 lowerCamelCase_ : Optional[int] = 4 lowerCamelCase_ : Tuple = None def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ : int = None if self.use_input_mask: lowerCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ : Tuple = None lowerCamelCase_ : str = None lowerCamelCase_ : List[Any] = None if self.use_labels: lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ : Any = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Tuple = TFDistilBertModel(config=UpperCAmelCase__ ) lowerCamelCase_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase_ : Union[str, Any] = model(UpperCAmelCase__ ) lowerCamelCase_ : Tuple = [input_ids, input_mask] lowerCamelCase_ : int = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Optional[int] = TFDistilBertForMaskedLM(config=UpperCAmelCase__ ) lowerCamelCase_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase_ : Union[str, Any] = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Union[str, Any] = TFDistilBertForQuestionAnswering(config=UpperCAmelCase__ ) lowerCamelCase_ : List[str] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, } lowerCamelCase_ : Tuple = model(UpperCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Any = self.num_labels lowerCamelCase_ : Optional[int] = TFDistilBertForSequenceClassification(UpperCAmelCase__ ) lowerCamelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase_ : List[Any] = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Dict = self.num_choices lowerCamelCase_ : Tuple = TFDistilBertForMultipleChoice(UpperCAmelCase__ ) lowerCamelCase_ : Dict = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase_ : str = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase_ : Tuple = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, } lowerCamelCase_ : Optional[Any] = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Optional[Any] = self.num_labels lowerCamelCase_ : Any = TFDistilBertForTokenClassification(UpperCAmelCase__ ) lowerCamelCase_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase_ : List[Any] = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ (self ): lowerCamelCase_ : List[str] = self.prepare_config_and_inputs() (lowerCamelCase_) : Union[str, Any] = config_and_inputs lowerCamelCase_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __lowercase ( _lowercase , _lowercase , unittest.TestCase ): lowerCamelCase : str = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) lowerCamelCase : int = ( { 'feature-extraction': TFDistilBertModel, 'fill-mask': TFDistilBertForMaskedLM, 'question-answering': TFDistilBertForQuestionAnswering, 'text-classification': TFDistilBertForSequenceClassification, 'token-classification': TFDistilBertForTokenClassification, 'zero-shot': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase : Optional[Any] = False lowerCamelCase : int = False def UpperCAmelCase__ (self ): lowerCamelCase_ : List[Any] = TFDistilBertModelTester(self ) lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase__ , dim=3_7 ) def UpperCAmelCase__ (self ): self.config_tester.run_common_tests() def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ ) def UpperCAmelCase__ (self ): lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ ) def UpperCAmelCase__ (self ): lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ ) def UpperCAmelCase__ (self ): lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase__ (self ): for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): lowerCamelCase_ : str = TFDistilBertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_tf class __lowercase ( unittest.TestCase ): @slow def UpperCAmelCase__ (self ): lowerCamelCase_ : Union[str, Any] = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) lowerCamelCase_ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase_ : Tuple = model(UpperCAmelCase__ )[0] lowerCamelCase_ : List[str] = [1, 6, 7_6_8] self.assertEqual(output.shape , UpperCAmelCase__ ) lowerCamelCase_ : Dict = tf.constant( [ [ [0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99], [0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04], [0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 )
718
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __lowercase : List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = ['''MLukeTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys __lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
357
0
from __future__ import annotations a : Optional[int] = list[tuple[int, int]] a : int = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a : Dict = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class a : """simple docstring""" def __init__( self : Optional[int] , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : float , __lowercase : Node | None , ) -> List[str]: __UpperCAmelCase : Dict = pos_x __UpperCAmelCase : Optional[int] = pos_y __UpperCAmelCase : str = (pos_y, pos_x) __UpperCAmelCase : Dict = goal_x __UpperCAmelCase : List[Any] = goal_y __UpperCAmelCase : List[Any] = g_cost __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : List[Any] = self.calculate_heuristic() def UpperCAmelCase ( self : str ) -> float: __UpperCAmelCase : Tuple = abs(self.pos_x - self.goal_x ) __UpperCAmelCase : Optional[int] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Optional[int] , __lowercase : List[Any] ) -> bool: return self.f_cost < other.f_cost class a : """simple docstring""" def __init__( self : List[str] , __lowercase : tuple[int, int] , __lowercase : tuple[int, int] ) -> Optional[int]: __UpperCAmelCase : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowercase ) __UpperCAmelCase : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowercase ) __UpperCAmelCase : int = [self.start] __UpperCAmelCase : list[Node] = [] __UpperCAmelCase : List[Any] = False def UpperCAmelCase ( self : Any ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __UpperCAmelCase : Dict = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: __UpperCAmelCase : List[str] = True return self.retrace_path(__lowercase ) self.closed_nodes.append(__lowercase ) __UpperCAmelCase : List[Any] = self.get_successors(__lowercase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowercase ) else: # retrieve the best current path __UpperCAmelCase : str = self.open_nodes.pop(self.open_nodes.index(__lowercase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowercase ) else: self.open_nodes.append(__lowercase ) if not self.reached: return [self.start.pos] return None def UpperCAmelCase ( self : Dict , __lowercase : Node ) -> list[Node]: __UpperCAmelCase : List[Any] = [] for action in delta: __UpperCAmelCase : str = parent.pos_x + action[1] __UpperCAmelCase : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowercase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowercase , __lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowercase , ) ) return successors def UpperCAmelCase ( self : Optional[int] , __lowercase : Node | None ) -> Path: __UpperCAmelCase : Optional[Any] = node __UpperCAmelCase : Optional[Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __UpperCAmelCase : Any = current_node.parent path.reverse() return path if __name__ == "__main__": a : Union[str, Any] = (0, 0) a : Dict = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("------") a : Union[str, Any] = GreedyBestFirst(init, goal) a : Optional[Any] = greedy_bf.search() if path: for pos_x, pos_y in path: a : Union[str, Any] = 2 for elem in grid: print(elem)
63
'''simple docstring''' import json import sys def _lowerCAmelCase (_lowercase , _lowercase ): """simple docstring""" with open(_lowercase , encoding="utf-8" ) as f: a__ = json.load(_lowercase ) a__ = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(_lowercase ): a__ = results[benchmark_name] a__ = benchmark_name.split("/" )[-1] output_md.append(F'### Benchmark: {benchmark_file_name}' ) a__ = "| metric |" a__ = "|--------|" a__ = "| new / old (diff) |" for metric_name in sorted(_lowercase ): a__ = benchmark_res[metric_name] a__ = metric_vals["new"] a__ = metric_vals.get("old" , _lowercase ) a__ = metric_vals.get("diff" , _lowercase ) a__ = F' {new_val:f}' if isinstance(_lowercase , (int, float) ) else "None" if old_val is not None: val_str += F' / {old_val:f}' if isinstance(_lowercase , (int, float) ) else "None" if dif_val is not None: val_str += F' ({dif_val:f})' if isinstance(_lowercase , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(_lowercase , "w" , encoding="utf-8" ) as f: f.writelines("\n".join(_lowercase ) ) if __name__ == "__main__": UpperCamelCase_ : Dict = sys.argv[1] UpperCamelCase_ : int = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
331
0
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging a_ : Optional[Any] = logging.get_logger(__name__) a_ : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all LED models at https://huggingface.co/models?filter=LED a_ : List[str] = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } a_ : Dict = { 'allenai/led-base-16384': 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def __a ( ): a__ = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) a__ = bs[:] a__ = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCAmelCase ) cs.append(2**8 + n ) n += 1 a__ = [chr(__UpperCAmelCase ) for n in cs] return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) def __a ( __UpperCAmelCase ): a__ = set() a__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) a__ = char return pairs class __UpperCamelCase ( _lowercase ): """simple docstring""" _lowercase : Any = VOCAB_FILES_NAMES _lowercase : int = PRETRAINED_VOCAB_FILES_MAP _lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : str = ['''input_ids''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="replace" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<mask>" , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> int: a__ = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else bos_token a__ = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else eos_token a__ = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else sep_token a__ = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cls_token a__ = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else unk_token a__ = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else pad_token # Mask token behave like a normal word, i.e. include the space before it a__ = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle: a__ = json.load(SCREAMING_SNAKE_CASE ) a__ = {v: k for k, v in self.encoder.items()} a__ = errors # how to handle errors in decoding a__ = bytes_to_unicode() a__ = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle: a__ = merges_handle.read().split('''\n''' )[1:-1] a__ = [tuple(merge.split() ) for merge in bpe_merges] a__ = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) a__ = {} a__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions a__ = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _UpperCAmelCase ( self ) -> Optional[int]: return len(self.encoder ) def _UpperCAmelCase ( self ) -> Optional[Any]: return dict(self.encoder , **self.added_tokens_encoder ) def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: if token in self.cache: return self.cache[token] a__ = tuple(SCREAMING_SNAKE_CASE ) a__ = get_pairs(SCREAMING_SNAKE_CASE ) if not pairs: return token while True: a__ = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break a__ , a__ = bigram a__ = [] a__ = 0 while i < len(SCREAMING_SNAKE_CASE ): try: a__ = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) a__ = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 a__ = tuple(SCREAMING_SNAKE_CASE ) a__ = new_word if len(SCREAMING_SNAKE_CASE ) == 1: break else: a__ = get_pairs(SCREAMING_SNAKE_CASE ) a__ = ''' '''.join(SCREAMING_SNAKE_CASE ) a__ = word return word def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: a__ = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE ): a__ = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE ).split(''' ''' ) ) return bpe_tokens def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: a__ = ''''''.join(SCREAMING_SNAKE_CASE ) a__ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return a__ = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) a__ = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE , ensure_ascii=SCREAMING_SNAKE_CASE ) + '''\n''' ) a__ = 0 with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ''' Please check that the tokenizer is not corrupted!''' ) a__ = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE ) + '''\n''' ) index += 1 return vocab_file, merge_file def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a__ = [self.cls_token_id] a__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: a__ = [self.sep_token_id] a__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE ) -> Dict: a__ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()): a__ = ''' ''' + text return (text, kwargs) def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> dict: a__ = super()._pad( encoded_inputs=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding_strategy=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) # Load from model defaults if return_attention_mask is None: a__ = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ = len(encoded_inputs['''global_attention_mask'''] ) != len(SCREAMING_SNAKE_CASE ) if needs_to_be_padded: a__ = len(SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": a__ = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
148
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ : List[str] = logging.get_logger(__name__) a_ : Tuple = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class __UpperCamelCase ( _lowercase ): """simple docstring""" _lowercase : List[Any] = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=3_0_5_2_2 , SCREAMING_SNAKE_CASE=7_6_8 , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=3_0_7_2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) a__ = vocab_size a__ = hidden_size a__ = num_hidden_layers a__ = num_attention_heads a__ = hidden_act a__ = intermediate_size a__ = hidden_dropout_prob a__ = attention_probs_dropout_prob a__ = max_position_embeddings a__ = type_vocab_size a__ = initializer_range a__ = layer_norm_eps a__ = position_embedding_type a__ = use_cache a__ = classifier_dropout class __UpperCamelCase ( _lowercase ): """simple docstring""" @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: a__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
148
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A ( __lowercase ): snake_case__ :Optional[Any] = "roformer" def __init__( self : Dict , __magic_name__ : Optional[Any]=50000 , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[Any]=768 , __magic_name__ : Dict=12 , __magic_name__ : Optional[int]=12 , __magic_name__ : Optional[Any]=3072 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[Any]=1536 , __magic_name__ : Any=2 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : int=1E-12 , __magic_name__ : Union[str, Any]=0 , __magic_name__ : Any=False , __magic_name__ : Dict=True , **__magic_name__ : Union[str, Any] , ): """simple docstring""" super().__init__(pad_token_id=snake_case_ , **snake_case_ ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = hidden_size if embedding_size is None else embedding_size lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = hidden_act lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = rotary_value lowerCAmelCase__ = use_cache class A ( __lowercase ): @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" if self.task == "multiple-choice": lowerCAmelCase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase__ = {0: '''batch''', 1: '''sequence'''} lowerCAmelCase__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
48
import numpy as np from transformers import Pipeline def __UpperCamelCase ( lowerCAmelCase__ : Tuple ): __a : Union[str, Any] = np.max(lowerCAmelCase__ , axis=-1 , keepdims=lowerCAmelCase__ ) __a : List[Any] = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase__ ) class UpperCamelCase__ ( __lowercase ): def lowerCAmelCase (self : Dict , **snake_case_ : str ): __a : int = {} if "second_text" in kwargs: __a : Tuple = kwargs['''second_text'''] return preprocess_kwargs, {}, {} def lowerCAmelCase (self : str , snake_case_ : Dict , snake_case_ : str=None ): return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework ) def lowerCAmelCase (self : Tuple , snake_case_ : Dict ): return self.model(**snake_case_ ) def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Dict ): __a : Optional[Any] = model_outputs.logits[0].numpy() __a : Optional[int] = softmax(snake_case_ ) __a : Optional[int] = np.argmax(snake_case_ ) __a : str = self.model.config.idalabel[best_class] __a : Any = probabilities[best_class].item() __a : Optional[int] = logits.tolist() return {"label": label, "score": score, "logits": logits}
521
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { 'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json', # See all ViT models at https://huggingface.co/models?filter=vit } class snake_case_ ( __lowercase ): A_ = 'vit' def __init__( self : str , _snake_case : Tuple=768 , _snake_case : List[Any]=12 , _snake_case : Dict=12 , _snake_case : int=3072 , _snake_case : Tuple="gelu" , _snake_case : str=0.0 , _snake_case : int=0.0 , _snake_case : List[str]=0.02 , _snake_case : Tuple=1E-12 , _snake_case : List[str]=224 , _snake_case : List[Any]=16 , _snake_case : int=3 , _snake_case : str=True , _snake_case : str=16 , **_snake_case : int , )->str: '''simple docstring''' super().__init__(**_snake_case ) __lowerCAmelCase : Optional[Any] = hidden_size __lowerCAmelCase : List[Any] = num_hidden_layers __lowerCAmelCase : Union[str, Any] = num_attention_heads __lowerCAmelCase : List[str] = intermediate_size __lowerCAmelCase : Tuple = hidden_act __lowerCAmelCase : Any = hidden_dropout_prob __lowerCAmelCase : Dict = attention_probs_dropout_prob __lowerCAmelCase : Optional[Any] = initializer_range __lowerCAmelCase : str = layer_norm_eps __lowerCAmelCase : Union[str, Any] = image_size __lowerCAmelCase : List[str] = patch_size __lowerCAmelCase : Union[str, Any] = num_channels __lowerCAmelCase : List[str] = qkv_bias __lowerCAmelCase : Union[str, Any] = encoder_stride class snake_case_ ( __lowercase ): A_ = version.parse('1.11' ) @property def UpperCAmelCase__ ( self : Tuple )->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] )->float: '''simple docstring''' return 1E-4
240
import cva import numpy as np class snake_case_ : def __init__( self : int , _snake_case : float , _snake_case : int )->int: '''simple docstring''' if k in (0.04, 0.06): __lowerCAmelCase : str = k __lowerCAmelCase : int = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Union[str, Any] )->str: '''simple docstring''' return str(self.k ) def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : str )->tuple[cva.Mat, list[list[int]]]: '''simple docstring''' __lowerCAmelCase : Optional[int] = cva.imread(_snake_case , 0 ) __lowerCAmelCase , __lowerCAmelCase : List[Any] = img.shape __lowerCAmelCase : list[list[int]] = [] __lowerCAmelCase : Any = img.copy() __lowerCAmelCase : int = cva.cvtColor(_snake_case , cva.COLOR_GRAY2RGB ) __lowerCAmelCase , __lowerCAmelCase : Tuple = np.gradient(_snake_case ) __lowerCAmelCase : Optional[int] = dx**2 __lowerCAmelCase : Union[str, Any] = dy**2 __lowerCAmelCase : Tuple = dx * dy __lowerCAmelCase : Dict = 0.04 __lowerCAmelCase : Optional[Any] = self.window_size // 2 for y in range(_snake_case , h - offset ): for x in range(_snake_case , w - offset ): __lowerCAmelCase : str = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowerCAmelCase : Any = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowerCAmelCase : Optional[Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowerCAmelCase : int = (wxx * wyy) - (wxy**2) __lowerCAmelCase : int = wxx + wyy __lowerCAmelCase : List[str] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": _UpperCAmelCase = HarrisCorner(0.04, 3) _UpperCAmelCase , _UpperCAmelCase = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
240
1
from functools import reduce __A : Optional[Any] = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = N ) -> int: """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(A__ ) * int(A__ ) ) , n[i : i + 13] ) ) for i in range(len(A__ ) - 12 ) ) if __name__ == "__main__": print(f"{solution() = }")
27
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class lowercase ( _UpperCAmelCase ): def lowercase__ ( self : Optional[int] ): return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def lowercase__ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']} return Dataset.from_dict(_lowercase ) def lowercase__ ( self : Dict ): SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records() SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase ) self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] ) for i, r in enumerate(_lowercase ): self.assertDictEqual(_lowercase , example_records[i] ) def lowercase__ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records() SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def lowercase__ ( self : List[Any] ): # checks what happens with missing columns SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}] SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase ) self.assertDictEqual(dset[0] , {'''col_1''': 1} ) self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns def lowercase__ ( self : int ): # checks if the type can be inferred from the second record SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}] SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase ) self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) ) def lowercase__ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] ) self.assertEqual(len(_lowercase ) , 0 ) self.assertListEqual(dset.column_names , [] )
35
0
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __snake_case ( SCREAMING_SNAKE_CASE ,unittest.TestCase): '''simple docstring''' UpperCamelCase__ : int = ConsistencyModelPipeline UpperCamelCase__ : str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS UpperCamelCase__ : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt UpperCamelCase__ : Dict = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ]) @property def _a ( self ): a__ = UNetaDModel.from_pretrained( """diffusers/consistency-models-test""" , subfolder="""test_unet""" , ) return unet @property def _a ( self ): a__ = UNetaDModel.from_pretrained( """diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , ) return unet def _a ( self , a_=False ): if class_cond: a__ = self.dummy_cond_unet else: a__ = self.dummy_uncond_unet # Default to CM multistep sampler a__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) a__ = { """unet""": unet, """scheduler""": scheduler, } return components def _a ( self , a_ , a_=0 ): if str(a_ ).startswith("""mps""" ): a__ = torch.manual_seed(a_ ) else: a__ = torch.Generator(device=a_ ).manual_seed(a_ ) a__ = { """batch_size""": 1, """num_inference_steps""": None, """timesteps""": [22, 0], """generator""": generator, """output_type""": """np""", } return inputs def _a ( self ): a__ = """cpu""" # ensure determinism for the device-dependent torch.Generator a__ = self.get_dummy_components() a__ = ConsistencyModelPipeline(**a_ ) a__ = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) a__ = self.get_dummy_inputs(a_ ) a__ = pipe(**a_ ).images assert image.shape == (1, 32, 32, 3) a__ = image[0, -3:, -3:, -1] a__ = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _a ( self ): a__ = """cpu""" # ensure determinism for the device-dependent torch.Generator a__ = self.get_dummy_components(class_cond=a_ ) a__ = ConsistencyModelPipeline(**a_ ) a__ = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) a__ = self.get_dummy_inputs(a_ ) a__ = 0 a__ = pipe(**a_ ).images assert image.shape == (1, 32, 32, 3) a__ = image[0, -3:, -3:, -1] a__ = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _a ( self ): a__ = """cpu""" # ensure determinism for the device-dependent torch.Generator a__ = self.get_dummy_components() a__ = ConsistencyModelPipeline(**a_ ) a__ = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) a__ = self.get_dummy_inputs(a_ ) a__ = 1 a__ = None a__ = pipe(**a_ ).images assert image.shape == (1, 32, 32, 3) a__ = image[0, -3:, -3:, -1] a__ = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _a ( self ): a__ = """cpu""" # ensure determinism for the device-dependent torch.Generator a__ = self.get_dummy_components(class_cond=a_ ) a__ = ConsistencyModelPipeline(**a_ ) a__ = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) a__ = self.get_dummy_inputs(a_ ) a__ = 1 a__ = None a__ = 0 a__ = pipe(**a_ ).images assert image.shape == (1, 32, 32, 3) a__ = image[0, -3:, -3:, -1] a__ = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class __snake_case ( unittest.TestCase): '''simple docstring''' def _a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self , a_=0 , a_=False , a_="cpu" , a_=torch.floataa , a_=(1, 3, 64, 64) ): a__ = torch.manual_seed(a_ ) a__ = { """num_inference_steps""": None, """timesteps""": [22, 0], """class_labels""": 0, """generator""": generator, """output_type""": """np""", } if get_fixed_latents: a__ = self.get_fixed_latents(seed=a_ , device=a_ , dtype=a_ , shape=a_ ) a__ = latents return inputs def _a ( self , a_=0 , a_="cpu" , a_=torch.floataa , a_=(1, 3, 64, 64) ): if type(a_ ) == str: a__ = torch.device(a_ ) a__ = torch.Generator(device=a_ ).manual_seed(a_ ) a__ = randn_tensor(a_ , generator=a_ , device=a_ , dtype=a_ ) return latents def _a ( self ): a__ = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) a__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) a__ = ConsistencyModelPipeline(unet=a_ , scheduler=a_ ) pipe.to(torch_device=a_ ) pipe.set_progress_bar_config(disable=a_ ) a__ = self.get_inputs() a__ = pipe(**a_ ).images assert image.shape == (1, 64, 64, 3) a__ = image[0, -3:, -3:, -1] a__ = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _a ( self ): a__ = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) a__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) a__ = ConsistencyModelPipeline(unet=a_ , scheduler=a_ ) pipe.to(torch_device=a_ ) pipe.set_progress_bar_config(disable=a_ ) a__ = self.get_inputs() a__ = 1 a__ = None a__ = pipe(**a_ ).images assert image.shape == (1, 64, 64, 3) a__ = image[0, -3:, -3:, -1] a__ = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def _a ( self ): a__ = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) a__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) a__ = ConsistencyModelPipeline(unet=a_ , scheduler=a_ ) pipe.to(torch_device=a_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a_ ) a__ = self.get_inputs(get_fixed_latents=a_ , device=a_ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a_ , enable_math=a_ , enable_mem_efficient=a_ ): a__ = pipe(**a_ ).images assert image.shape == (1, 64, 64, 3) a__ = image[0, -3:, -3:, -1] a__ = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def _a ( self ): a__ = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" ) a__ = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) a__ = ConsistencyModelPipeline(unet=a_ , scheduler=a_ ) pipe.to(torch_device=a_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=a_ ) a__ = self.get_inputs(get_fixed_latents=a_ , device=a_ ) a__ = 1 a__ = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=a_ , enable_math=a_ , enable_mem_efficient=a_ ): a__ = pipe(**a_ ).images assert image.shape == (1, 64, 64, 3) a__ = image[0, -3:, -3:, -1] a__ = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
351
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
351
1
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __a ( __magic_name__ ): """simple docstring""" @require_torch def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " lowerCAmelCase__ : List[str] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " lowerCAmelCase__ : List[str] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache lowerCAmelCase__ : Dict = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network lowerCAmelCase__ : Any = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed lowerCAmelCase__ : str = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase__ : Any = "1" lowerCAmelCase__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " lowerCAmelCase__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " lowerCAmelCase__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache lowerCAmelCase__ : str = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(snake_case ) BertModel.from_pretrained(snake_case ) BertTokenizer.from_pretrained(snake_case ) pipeline(task="fill-mask" , model=snake_case ) # baseline - just load from_pretrained with normal network lowerCAmelCase__ : Any = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed lowerCAmelCase__ : Optional[Any] = self.get_env() lowerCAmelCase__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n " lowerCAmelCase__ : List[str] = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n " lowerCAmelCase__ : Optional[int] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " # baseline - just load from_pretrained with normal network lowerCAmelCase__ : Dict = [sys.executable, "-c", "\n".join([load, run] )] # should succeed lowerCAmelCase__ : List[str] = self.get_env() lowerCAmelCase__ : Optional[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # next emulate no network lowerCAmelCase__ : Optional[int] = [sys.executable, "-c", "\n".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase__ : int = "1" lowerCAmelCase__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ : Any = "\nfrom transformers import pipeline\n " lowerCAmelCase__ : Tuple = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n " lowerCAmelCase__ : Union[str, Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " lowerCAmelCase__ : str = self.get_env() lowerCAmelCase__ : Tuple = "1" lowerCAmelCase__ : Dict = [sys.executable, "-c", "\n".join([load, mock, run] )] lowerCAmelCase__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , ) @require_torch def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ : Dict = "\nfrom transformers import AutoModel\n " lowerCAmelCase__ : Tuple = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n " # baseline - just load from_pretrained with normal network lowerCAmelCase__ : str = [sys.executable, "-c", "\n".join([load, run] )] # should succeed lowerCAmelCase__ : str = self.get_env() lowerCAmelCase__ : Dict = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase__ : str = "1" lowerCAmelCase__ : Optional[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() )
453
"""simple docstring""" import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __a ( __magic_name__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = LxmertTokenizer __UpperCamelCase : Optional[Any] = LxmertTokenizerFast __UpperCamelCase : str = True __UpperCamelCase : Any = True def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" super().setUp() lowerCAmelCase__ : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def SCREAMING_SNAKE_CASE_ ( self , snake_case ): """simple docstring""" lowerCAmelCase__ : List[Any] = "UNwant\u00E9d,running" lowerCAmelCase__ : Union[str, Any] = "unwanted, running" return input_text, output_text def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ : str = self.tokenizer_class(self.vocab_file ) lowerCAmelCase__ : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(snake_case , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return lowerCAmelCase__ : int = self.get_tokenizer() lowerCAmelCase__ : List[str] = self.get_rust_tokenizer() lowerCAmelCase__ : Optional[Any] = "I was born in 92000, and this is falsé." lowerCAmelCase__ : int = tokenizer.tokenize(snake_case ) lowerCAmelCase__ : int = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) lowerCAmelCase__ : str = tokenizer.encode(snake_case , add_special_tokens=snake_case ) lowerCAmelCase__ : List[str] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) lowerCAmelCase__ : Tuple = self.get_rust_tokenizer() lowerCAmelCase__ : Tuple = tokenizer.encode(snake_case ) lowerCAmelCase__ : List[Any] = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case )
453
1
'''simple docstring''' from random import shuffle import tensorflow as tf from numpy import array def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = int(_SCREAMING_SNAKE_CASE ) assert noofclusters < len(_SCREAMING_SNAKE_CASE ) # Find out the dimensionality UpperCAmelCase : List[Any] = len(vectors[0] ) # Will help select random centroids from among the available vectors UpperCAmelCase : Dict = list(range(len(_SCREAMING_SNAKE_CASE ) ) ) shuffle(_SCREAMING_SNAKE_CASE ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. UpperCAmelCase : Any = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION UpperCAmelCase : Tuple = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points UpperCAmelCase : Optional[int] = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(_SCREAMING_SNAKE_CASE ) ] ##These nodes will assign the centroid Variables the appropriate ##values UpperCAmelCase : Optional[Any] = tf.placeholder("float64" , [dim] ) UpperCAmelCase : int = [] for centroid in centroids: cent_assigns.append(tf.assign(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) UpperCAmelCase : int = [tf.Variable(0 ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ##These nodes will assign an assignment Variable the appropriate ##value UpperCAmelCase : str = tf.placeholder("int32" ) UpperCAmelCase : Tuple = [] for assignment in assignments: cluster_assigns.append(tf.assign(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input UpperCAmelCase : Union[str, Any] = tf.placeholder("float" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors UpperCAmelCase : str = tf.reduce_mean(_SCREAMING_SNAKE_CASE , 0 ) ##Node for computing Euclidean distances # Placeholders for input UpperCAmelCase : Dict = tf.placeholder("float" , [dim] ) UpperCAmelCase : List[Any] = tf.placeholder("float" , [dim] ) UpperCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input UpperCAmelCase : str = tf.placeholder("float" , [noofclusters] ) UpperCAmelCase : Dict = tf.argmin(_SCREAMING_SNAKE_CASE , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. UpperCAmelCase : int = tf.initialize_all_variables() # Initialize all variables sess.run(_SCREAMING_SNAKE_CASE ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. UpperCAmelCase : str = 100 for _ in range(_SCREAMING_SNAKE_CASE ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(_SCREAMING_SNAKE_CASE ) ): UpperCAmelCase : str = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. UpperCAmelCase : Tuple = [ sess.run(_SCREAMING_SNAKE_CASE , feed_dict={va: vect, va: sess.run(_SCREAMING_SNAKE_CASE )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input UpperCAmelCase : Tuple = sess.run( _SCREAMING_SNAKE_CASE , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(_SCREAMING_SNAKE_CASE ): # Collect all the vectors assigned to this cluster UpperCAmelCase : Optional[Any] = [ vectors[i] for i in range(len(_SCREAMING_SNAKE_CASE ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location UpperCAmelCase : Any = sess.run( _SCREAMING_SNAKE_CASE , feed_dict={mean_input: array(_SCREAMING_SNAKE_CASE )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments UpperCAmelCase : Union[str, Any] = sess.run(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = sess.run(_SCREAMING_SNAKE_CASE ) return centroids, assignments
701
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(snake_case , "num_attention_heads" ) ) self.parent.assertTrue(hasattr(snake_case , "num_encoder_blocks" ) ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=6_4 , snake_case=3 , snake_case=4 , snake_case=[2, 2, 2, 2] , snake_case=[8, 4, 2, 1] , snake_case=[1_6, 3_2, 6_4, 1_2_8] , snake_case=[1, 4, 8, 1_6] , snake_case=[1, 2, 4, 8] , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=3 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Optional[int] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Dict = image_size UpperCAmelCase : int = num_channels UpperCAmelCase : List[Any] = num_encoder_blocks UpperCAmelCase : Dict = sr_ratios UpperCAmelCase : Union[str, Any] = depths UpperCAmelCase : Optional[Any] = hidden_sizes UpperCAmelCase : Union[str, Any] = downsampling_rates UpperCAmelCase : Tuple = num_attention_heads UpperCAmelCase : Tuple = is_training UpperCAmelCase : Any = use_labels UpperCAmelCase : str = hidden_act UpperCAmelCase : Union[str, Any] = hidden_dropout_prob UpperCAmelCase : Any = attention_probs_dropout_prob UpperCAmelCase : int = initializer_range UpperCAmelCase : List[str] = num_labels UpperCAmelCase : Optional[Any] = scope def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase : int = self.get_config() return config, pixel_values, labels def A_ ( self ): '''simple docstring''' return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = SegformerModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Optional[int] = model(snake_case ) UpperCAmelCase : List[str] = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = self.num_labels UpperCAmelCase : Optional[Any] = SegformerForSemanticSegmentation(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : str = model(snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) UpperCAmelCase : Dict = model(snake_case , labels=snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = 1 UpperCAmelCase : List[Any] = SegformerForSemanticSegmentation(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(snake_case ) UpperCAmelCase : Optional[int] = model(snake_case , labels=snake_case ) self.parent.assertGreater(result.loss , 0.0 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs UpperCAmelCase : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : int = ( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False SCREAMING_SNAKE_CASE__ : int = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = SegformerModelTester(self ) UpperCAmelCase : Dict = SegformerConfigTester(self , config_class=snake_case ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*snake_case ) @unittest.skip("SegFormer does not use inputs_embeds" ) def A_ ( self ): '''simple docstring''' pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[Any] = model_class(snake_case ) UpperCAmelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Optional[int] = [*signature.parameters.keys()] UpperCAmelCase : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Tuple = True for model_class in self.all_model_classes: UpperCAmelCase : Tuple = True UpperCAmelCase : Optional[int] = False UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Dict = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): UpperCAmelCase : str = model(**self._prepare_for_class(snake_case , snake_case ) ) UpperCAmelCase : int = outputs.attentions UpperCAmelCase : Union[str, Any] = sum(self.model_tester.depths ) self.assertEqual(len(snake_case ) , snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase : Any = True UpperCAmelCase : int = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) UpperCAmelCase : Any = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) # verify the first attentions (first block, first layer) UpperCAmelCase : Optional[int] = (self.model_tester.image_size // 4) ** 2 UpperCAmelCase : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) UpperCAmelCase : List[str] = (self.model_tester.image_size // 3_2) ** 2 UpperCAmelCase : int = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) UpperCAmelCase : Any = len(snake_case ) # Check attention is always last and order is fine UpperCAmelCase : Dict = True UpperCAmelCase : int = True UpperCAmelCase : Optional[Any] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) self.assertEqual(out_len + 1 , len(snake_case ) ) UpperCAmelCase : int = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) # verify the first attentions (first block, first layer) UpperCAmelCase : Optional[int] = (self.model_tester.image_size // 4) ** 2 UpperCAmelCase : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def A_ ( self ): '''simple docstring''' def check_hidden_states_output(snake_case , snake_case , snake_case ): UpperCAmelCase : Optional[Any] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): UpperCAmelCase : Dict = model(**self._prepare_for_class(snake_case , snake_case ) ) UpperCAmelCase : List[str] = outputs.hidden_states UpperCAmelCase : Optional[Any] = self.model_tester.num_encoder_blocks self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Dict = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Union[str, Any] = True check_hidden_states_output(snake_case , snake_case , snake_case ) def A_ ( self ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Dict = True for model_class in self.all_model_classes: if model_class in get_values(snake_case ): continue UpperCAmelCase : Optional[int] = model_class(snake_case ) model.to(snake_case ) model.train() UpperCAmelCase : List[Any] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) UpperCAmelCase : Tuple = model(**snake_case ).loss loss.backward() @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A_ ( self ): '''simple docstring''' pass @slow def A_ ( self ): '''simple docstring''' for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = SegformerModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case ) UpperCAmelCase : Any = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( snake_case ) UpperCAmelCase : Optional[int] = prepare_img() UpperCAmelCase : List[Any] = image_processor(images=snake_case , return_tensors="pt" ) UpperCAmelCase : List[Any] = encoded_inputs.pixel_values.to(snake_case ) with torch.no_grad(): UpperCAmelCase : List[Any] = model(snake_case ) UpperCAmelCase : int = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCAmelCase : List[str] = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1e-4 ) ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case ) UpperCAmelCase : Dict = SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(snake_case ) UpperCAmelCase : Any = prepare_img() UpperCAmelCase : Tuple = image_processor(images=snake_case , return_tensors="pt" ) UpperCAmelCase : Optional[Any] = encoded_inputs.pixel_values.to(snake_case ) with torch.no_grad(): UpperCAmelCase : int = model(snake_case ) UpperCAmelCase : List[Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCAmelCase : int = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1e-1 ) ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case ) UpperCAmelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( snake_case ) UpperCAmelCase : List[str] = prepare_img() UpperCAmelCase : Any = image_processor(images=snake_case , return_tensors="pt" ) UpperCAmelCase : List[str] = encoded_inputs.pixel_values.to(snake_case ) with torch.no_grad(): UpperCAmelCase : str = model(snake_case ) UpperCAmelCase : int = outputs.logits.detach().cpu() UpperCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase : Tuple = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case ) UpperCAmelCase : Tuple = image_processor.post_process_semantic_segmentation(outputs=snake_case ) UpperCAmelCase : List[str] = torch.Size((1_2_8, 1_2_8) ) self.assertEqual(segmentation[0].shape , snake_case )
609
0
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCAmelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') UpperCAmelCase__ = F"https://www.google.com/search?q={query}&num=100" UpperCAmelCase__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: UpperCAmelCase__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: UpperCAmelCase__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
351
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class snake_case_ ( __UpperCamelCase ): """simple docstring""" snake_case__ = """glpn""" def __init__(self: Tuple , __UpperCAmelCase: Tuple=3 , __UpperCAmelCase: Dict=4 , __UpperCAmelCase: Any=[2, 2, 2, 2] , __UpperCAmelCase: Optional[int]=[8, 4, 2, 1] , __UpperCAmelCase: Dict=[32, 64, 160, 256] , __UpperCAmelCase: List[str]=[7, 3, 3, 3] , __UpperCAmelCase: Dict=[4, 2, 2, 2] , __UpperCAmelCase: Optional[int]=[1, 2, 5, 8] , __UpperCAmelCase: Dict=[4, 4, 4, 4] , __UpperCAmelCase: List[str]="gelu" , __UpperCAmelCase: str=0.0 , __UpperCAmelCase: List[Any]=0.0 , __UpperCAmelCase: Dict=0.02 , __UpperCAmelCase: List[str]=0.1 , __UpperCAmelCase: Union[str, Any]=1E-6 , __UpperCAmelCase: Dict=64 , __UpperCAmelCase: Dict=10 , __UpperCAmelCase: Union[str, Any]=-1 , **__UpperCAmelCase: Dict , ) -> str: '''simple docstring''' super().__init__(**__UpperCAmelCase ) __a : Optional[Any] = num_channels __a : Tuple = num_encoder_blocks __a : int = depths __a : Union[str, Any] = sr_ratios __a : str = hidden_sizes __a : List[Any] = patch_sizes __a : Optional[Any] = strides __a : Any = mlp_ratios __a : Any = num_attention_heads __a : Any = hidden_act __a : List[Any] = hidden_dropout_prob __a : int = attention_probs_dropout_prob __a : List[str] = initializer_range __a : Optional[int] = drop_path_rate __a : Any = layer_norm_eps __a : List[Any] = decoder_hidden_size __a : Any = max_depth __a : int = head_in_index
351
1
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A_ ( __lowercase ): '''simple docstring''' def __init__( self , _A , _A , _A , _A = None , ) -> List[Any]: """simple docstring""" super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use _UpperCAmelCase : str = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(''','''): _UpperCAmelCase : Dict = int(_A) _UpperCAmelCase : Optional[int] = dict(sorted(self.labels.items())) def snake_case__ ( self , _A) -> List[int]: """simple docstring""" if not isinstance(_A , _A): _UpperCAmelCase : Tuple = list(_A) for l in label: if l not in self.labels: raise ValueError( f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''') return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" _UpperCAmelCase : int = len(_A) _UpperCAmelCase : Optional[Any] = self.transformer.config.sample_size _UpperCAmelCase : Any = self.transformer.config.in_channels _UpperCAmelCase : Any = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) _UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents _UpperCAmelCase : Dict = torch.tensor(_A , device=self.device).reshape(-1) _UpperCAmelCase : List[str] = torch.tensor([1000] * batch_size , device=self.device) _UpperCAmelCase : str = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: _UpperCAmelCase : int = latent_model_input[: len(_A) // 2] _UpperCAmelCase : Any = torch.cat([half, half] , dim=0) _UpperCAmelCase : Optional[int] = self.scheduler.scale_model_input(_A , _A) _UpperCAmelCase : Union[str, Any] = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) _UpperCAmelCase : List[Any] = latent_model_input.device.type == '''mps''' if isinstance(_A , _A): _UpperCAmelCase : Any = torch.floataa if is_mps else torch.floataa else: _UpperCAmelCase : Optional[int] = torch.intaa if is_mps else torch.intaa _UpperCAmelCase : str = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: _UpperCAmelCase : Union[str, Any] = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML _UpperCAmelCase : Union[str, Any] = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output _UpperCAmelCase : List[Any] = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: _UpperCAmelCase : int = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] _UpperCAmelCase : List[str] = torch.split(_A , len(_A) // 2 , dim=0) _UpperCAmelCase : Dict = uncond_eps + guidance_scale * (cond_eps - uncond_eps) _UpperCAmelCase : List[Any] = torch.cat([half_eps, half_eps] , dim=0) _UpperCAmelCase : Optional[Any] = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: _UpperCAmelCase : List[Any] = torch.split(_A , _A , dim=1) else: _UpperCAmelCase : str = noise_pred # compute previous image: x_t -> x_t-1 _UpperCAmelCase : Dict = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: _UpperCAmelCase : Optional[Any] = latent_model_input.chunk(2 , dim=0) else: _UpperCAmelCase : str = latent_model_input _UpperCAmelCase : Any = 1 / self.vae.config.scaling_factor * latents _UpperCAmelCase : Any = self.vae.decode(_A).sample _UpperCAmelCase : Tuple = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _UpperCAmelCase : Dict = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": _UpperCAmelCase : Union[str, Any] = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
705
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient SCREAMING_SNAKE_CASE = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN']) def _lowerCamelCase ( __A : Optional[Any] ) -> int: _UpperCAmelCase : Optional[int] = test_results.split(''' ''' ) _UpperCAmelCase : Optional[int] = 0 _UpperCAmelCase : Union[str, Any] = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. _UpperCAmelCase : Tuple = expressions[-2] if '''=''' in expressions[-1] else expressions[-1] for i, expression in enumerate(__A ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def _lowerCamelCase ( __A : List[str] ) -> int: _UpperCAmelCase : Optional[Any] = {} _UpperCAmelCase : int = None _UpperCAmelCase : Any = False for line in failures_short_lines.split('''\n''' ): if re.search(r'''_ \[doctest\]''' , __A ): _UpperCAmelCase : Tuple = True _UpperCAmelCase : Optional[Any] = line.split(''' ''' )[2] elif in_error and not line.split(''' ''' )[0].isdigit(): _UpperCAmelCase : int = line _UpperCAmelCase : Optional[Any] = False return failures class A_ : '''simple docstring''' def __init__( self , _A , _A) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = title _UpperCAmelCase : List[str] = doc_test_results['''time_spent'''].split(''',''')[0] _UpperCAmelCase : List[Any] = doc_test_results['''success'''] _UpperCAmelCase : Optional[Any] = doc_test_results['''failures'''] _UpperCAmelCase : Tuple = self.n_success + self.n_failures # Failures and success of the modeling tests _UpperCAmelCase : Optional[int] = doc_test_results @property def snake_case__ ( self) -> str: """simple docstring""" _UpperCAmelCase : List[str] = [self._time_spent] _UpperCAmelCase : Dict = 0 for time in time_spent: _UpperCAmelCase : Optional[int] = time.split(''':''') # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(_A) == 1: _UpperCAmelCase : List[Any] = [0, 0, time_parts[0]] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return f'''{int(_A)}h{int(_A)}m{int(_A)}s''' @property def snake_case__ ( self) -> Dict: """simple docstring""" return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def snake_case__ ( self) -> Dict: """simple docstring""" return { "type": "section", "text": { "type": "plain_text", "text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } @property def snake_case__ ( self) -> Dict: """simple docstring""" return { "type": "section", "text": { "type": "plain_text", "text": ( f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in''' f''' {self.time}.''' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } @property def snake_case__ ( self) -> Dict: """simple docstring""" _UpperCAmelCase : List[str] = 40 _UpperCAmelCase : List[str] = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A)} _UpperCAmelCase : List[Any] = '''''' for category, failures in category_failures.items(): if len(_A) == 0: continue if report != "": report += "\n\n" report += f'''*{category} failures*:'''.ljust(line_length // 2).rjust(line_length // 2) + "\n" report += "`" report += "`\n`".join(_A) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f'''The following examples had failures:\n\n\n{report}\n''', }, } @property def snake_case__ ( self) -> str: """simple docstring""" _UpperCAmelCase : int = [self.header] if self.n_failures > 0: blocks.append(self.failures) if self.n_failures > 0: blocks.extend([self.category_failures]) if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(_A) @staticmethod def snake_case__ ( ) -> List[str]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = [ { '''type''': '''section''', '''text''': { '''type''': '''plain_text''', '''text''': '''There was an issue running the tests.''', }, '''accessory''': { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True}, '''url''': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } ] print('''Sending the following payload''') print(json.dumps({'''blocks''': json.loads(_A)})) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , ) def snake_case__ ( self) -> str: """simple docstring""" print('''Sending the following payload''') print(json.dumps({'''blocks''': json.loads(self.payload)})) _UpperCAmelCase : str = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.''' _UpperCAmelCase : Optional[Any] = client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , ) def snake_case__ ( self , _A , _A , _A , _A) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[int] = '''''' for key, value in failures.items(): _UpperCAmelCase : str = value[:200] + ''' [Truncated]''' if len(_A) > 250 else value failures_text += f'''*{key}*\n_{value}_\n\n''' _UpperCAmelCase : Optional[Any] = job_name _UpperCAmelCase : Any = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}} if job_link is not None: _UpperCAmelCase : List[Any] = { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True}, '''url''': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def snake_case__ ( self) -> Union[str, Any]: """simple docstring""" if self.thread_ts is None: raise ValueError('''Can only post reply if a post has been made.''') _UpperCAmelCase : Tuple = self.doc_test_results.pop('''job_link''') self.doc_test_results.pop('''failures''') self.doc_test_results.pop('''success''') self.doc_test_results.pop('''time_spent''') _UpperCAmelCase : Any = sorted(self.doc_test_results.items() , key=lambda _A: t[0]) for job, job_result in sorted_dict: if len(job_result['''failures''']): _UpperCAmelCase : Dict = f'''*Num failures* :{len(job_result['failed'])} \n''' _UpperCAmelCase : Tuple = job_result['''failures'''] _UpperCAmelCase : Dict = self.get_reply_blocks(_A , _A , _A , text=_A) print('''Sending the following reply''') print(json.dumps({'''blocks''': blocks})) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , ) time.sleep(1) def _lowerCamelCase ( ) -> str: _UpperCAmelCase : Any = os.environ['''GITHUB_RUN_ID'''] _UpperCAmelCase : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100''' _UpperCAmelCase : Optional[int] = requests.get(__A ).json() _UpperCAmelCase : Optional[int] = {} try: jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) _UpperCAmelCase : int = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(__A ): _UpperCAmelCase : int = requests.get(url + f'''&page={i + 2}''' ).json() jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return jobs except Exception as e: print('''Unknown error, could not fetch links.''' , __A ) return {} def _lowerCamelCase ( __A : str ) -> Tuple: _UpperCAmelCase : Optional[Any] = {} if os.path.exists(__A ): _UpperCAmelCase : Optional[int] = os.listdir(__A ) for file in files: try: with open(os.path.join(__A , __A ) , encoding='''utf-8''' ) as f: _UpperCAmelCase : Tuple = f.read() except UnicodeDecodeError as e: raise ValueError(f'''Could not open {os.path.join(__A , __A )}.''' ) from e return _artifact def _lowerCamelCase ( ) -> List[str]: class A_ : '''simple docstring''' def __init__( self , _A) -> int: """simple docstring""" _UpperCAmelCase : List[Any] = name _UpperCAmelCase : Union[str, Any] = [] def __str__( self) -> int: """simple docstring""" return self.name def snake_case__ ( self , _A) -> int: """simple docstring""" self.paths.append({'''name''': self.name, '''path''': path}) _UpperCAmelCase : Dict[str, Artifact] = {} _UpperCAmelCase : Dict = filter(os.path.isdir , os.listdir() ) for directory in directories: _UpperCAmelCase : List[str] = directory if artifact_name not in _available_artifacts: _UpperCAmelCase : Optional[int] = Artifact(__A ) _available_artifacts[artifact_name].add_path(__A ) return _available_artifacts if __name__ == "__main__": SCREAMING_SNAKE_CASE = get_job_links() SCREAMING_SNAKE_CASE = retrieve_available_artifacts() SCREAMING_SNAKE_CASE = collections.OrderedDict( [ ('*.py', 'API Examples'), ('*.md', 'MD Examples'), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' SCREAMING_SNAKE_CASE = { v: { 'failed': [], 'failures': {}, } for v in docs.values() } # Link to the GitHub Action job SCREAMING_SNAKE_CASE = github_actions_job_links.get('run_doctests') SCREAMING_SNAKE_CASE = available_artifacts['doc_tests_gpu_test_reports'].paths[0] SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['name']) if "stats" in artifact: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = handle_test_results(artifact['stats']) SCREAMING_SNAKE_CASE = failed SCREAMING_SNAKE_CASE = success SCREAMING_SNAKE_CASE = time_spent[1:-1] + ', ' SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['failures_short']) for line in artifact["summary_short"].split('\n'): if re.search('FAILED', line): SCREAMING_SNAKE_CASE = line.replace('FAILED ', '') SCREAMING_SNAKE_CASE = line.split()[0].replace('\n', '') if "::" in line: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = line.split('::') else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): SCREAMING_SNAKE_CASE = docs[file_regex] doc_test_results[category]["failed"].append(test) SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else 'N/A' SCREAMING_SNAKE_CASE = failure break SCREAMING_SNAKE_CASE = Message('🤗 Results of the doc tests.', doc_test_results) message.post() message.post_reply()
186
0
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __snake_case (unittest.TestCase ): @property def SCREAMING_SNAKE_CASE ( self : str ) -> Any: '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Union[str, Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _lowerCAmelCase : Any = self.dummy_uncond_unet _lowerCAmelCase : Optional[int] = ScoreSdeVeScheduler() _lowerCAmelCase : Optional[Any] = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) sde_ve.to(_UpperCAmelCase ) sde_ve.set_progress_bar_config(disable=_UpperCAmelCase ) _lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_UpperCAmelCase ).images _lowerCAmelCase : int = torch.manual_seed(0 ) _lowerCAmelCase : Tuple = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_UpperCAmelCase , return_dict=_UpperCAmelCase )[ 0 ] _lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] _lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCAmelCase : List[str] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __snake_case (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _lowerCAmelCase : Dict = """google/ncsnpp-church-256""" _lowerCAmelCase : int = UNetaDModel.from_pretrained(_UpperCAmelCase ) _lowerCAmelCase : Union[str, Any] = ScoreSdeVeScheduler.from_pretrained(_UpperCAmelCase ) _lowerCAmelCase : Union[str, Any] = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) sde_ve.to(_UpperCAmelCase ) sde_ve.set_progress_bar_config(disable=_UpperCAmelCase ) _lowerCAmelCase : Dict = torch.manual_seed(0 ) _lowerCAmelCase : str = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_UpperCAmelCase ).images _lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowerCAmelCase : List[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
429
"""simple docstring""" from bisect import bisect from itertools import accumulate def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase_ = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = [i[0] for i in r], [i[1] for i in r] UpperCAmelCase_ = list(accumulate(lowerCAmelCase__ ) ) UpperCAmelCase_ = bisect(lowerCAmelCase__ , lowerCAmelCase__ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
82
0
def _lowerCamelCase( UpperCamelCase__ : Optional[int] ) -> int: A : Dict = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def _lowerCamelCase( UpperCamelCase__ : Any ) -> Union[str, Any]: A : Optional[int] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key A : Dict = remove_duplicates(key.upper() ) A : str = len(_UpperCAmelCase ) # First fill cipher with key characters A : str = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_UpperCAmelCase ) , 26 ): A : Dict = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 A : List[str] = alphabet[i - offset] A : int = char return cipher_alphabet def _lowerCamelCase( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> Optional[int]: return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def _lowerCamelCase( UpperCamelCase__ : Any , UpperCamelCase__ : int ) -> Optional[int]: A : Union[str, Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() ) def _lowerCamelCase( ) -> Dict: A : int = input('''Enter message to encode or decode: ''' ).strip() A : List[str] = input('''Enter keyword: ''' ).strip() A : Dict = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: A : List[str] = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) A : List[str] = create_cipher_map(_UpperCAmelCase ) print(func(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
716
'''simple docstring''' import requests snake_case_ = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=""" def _lowerCamelCase( UpperCamelCase__ : str ) -> None: # fetching a list of articles in json format A : Any = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['''articles'''] , 1 ): print(F'''{i}.) {article['title']}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
537
0
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCAmelCase_ (lowerCAmelCase__: Dict ): """simple docstring""" UpperCAmelCase_: List[str] = [False] * len(lowercase__ ) UpperCAmelCase_: List[str] = [-1] * len(lowercase__ ) def dfs(lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[str] ): UpperCAmelCase_: List[Any] = True UpperCAmelCase_: List[str] = c for u in graph[v]: if not visited[u]: dfs(lowercase__ , 1 - c ) for i in range(len(lowercase__ ) ): if not visited[i]: dfs(lowercase__ , 0 ) for i in range(len(lowercase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph a : List[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
556
from __future__ import annotations def A ( lowercase__ : int ) -> list[int]: UpperCamelCase__ :Union[str, Any] = [True] * limit UpperCamelCase__ :int = False UpperCamelCase__ :Optional[Any] = False UpperCamelCase__ :str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCamelCase__ :List[Any] = i * 2 while index < limit: UpperCamelCase__ :Tuple = False UpperCamelCase__ :Tuple = index + i UpperCamelCase__ :str = [2] for i in range(3 , lowercase__ , 2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def A ( lowercase__ : int = 100_0000 ) -> int: UpperCamelCase__ :Any = prime_sieve(lowercase__ ) UpperCamelCase__ :Optional[int] = 0 UpperCamelCase__ :Optional[Any] = 0 for i in range(len(lowercase__ ) ): for j in range(i + length , len(lowercase__ ) ): UpperCamelCase__ :Any = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCamelCase__ :Union[str, Any] = j - i UpperCamelCase__ :Any = sol return largest if __name__ == "__main__": print(f'''{solution() = }''')
45
0
from math import pi def _a ( UpperCAmelCase , UpperCAmelCase ) -> float: """simple docstring""" return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
130
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def _a ( UpperCAmelCase ) -> bool: """simple docstring""" lowerCamelCase__ : int = int(number**0.5 ) return number == sq * sq def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> tuple[int, int]: """simple docstring""" lowerCamelCase__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowerCamelCase__ : int = x_den * y_den * z_den lowerCamelCase__ : int = gcd(UpperCAmelCase , UpperCAmelCase ) top //= hcf bottom //= hcf return top, bottom def _a ( UpperCAmelCase = 35 ) -> int: """simple docstring""" lowerCamelCase__ : set = set() lowerCamelCase__ : int lowerCamelCase__ : Fraction = Fraction(0 ) lowerCamelCase__ : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowerCamelCase__ : Optional[Any] = x_num * y_den + x_den * y_num lowerCamelCase__ : Dict = x_den * y_den lowerCamelCase__ : Union[str, Any] = gcd(UpperCAmelCase , UpperCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCamelCase__ : List[Any] = add_three( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) unique_s.add(UpperCAmelCase ) # n=2 lowerCamelCase__ : List[str] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowerCamelCase__ : Optional[Any] = x_den * x_den * y_den * y_den if is_sq(UpperCAmelCase ) and is_sq(UpperCAmelCase ): lowerCamelCase__ : List[str] = int(sqrt(UpperCAmelCase ) ) lowerCamelCase__ : Tuple = int(sqrt(UpperCAmelCase ) ) lowerCamelCase__ : Any = gcd(UpperCAmelCase , UpperCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCamelCase__ : Tuple = add_three( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) unique_s.add(UpperCAmelCase ) # n=-1 lowerCamelCase__ : Optional[Any] = x_num * y_num lowerCamelCase__ : List[Any] = x_den * y_num + x_num * y_den lowerCamelCase__ : Union[str, Any] = gcd(UpperCAmelCase , UpperCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCamelCase__ : str = add_three( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) unique_s.add(UpperCAmelCase ) # n=2 lowerCamelCase__ : Optional[int] = x_num * x_num * y_num * y_num lowerCamelCase__ : Union[str, Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(UpperCAmelCase ) and is_sq(UpperCAmelCase ): lowerCamelCase__ : List[str] = int(sqrt(UpperCAmelCase ) ) lowerCamelCase__ : Dict = int(sqrt(UpperCAmelCase ) ) lowerCamelCase__ : str = gcd(UpperCAmelCase , UpperCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCamelCase__ : Optional[Any] = add_three( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) unique_s.add(UpperCAmelCase ) for num, den in unique_s: total += Fraction(UpperCAmelCase , UpperCAmelCase ) return total.denominator + total.numerator if __name__ == "__main__": print(F'''{solution() = }''')
130
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : str = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCamelCase ( A , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = GPTSanJapaneseTokenizer lowerCAmelCase__ = False lowerCAmelCase__ = {"""do_clean_text""": False, """add_prefix_space""": False} def __lowerCamelCase ( self : Dict): '''simple docstring''' super().setUp() # fmt: off __lowercase =['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on __lowercase ={'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 __lowercase ={'unk_token': '<unk>'} __lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) with open(self.emoji_file , 'w') as emoji_writer: emoji_writer.write(json.dumps(_lowerCAmelCase)) def __lowerCamelCase ( self : Any , **_lowerCAmelCase : Tuple): '''simple docstring''' kwargs.update(self.special_tokens_map) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase) def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Dict): '''simple docstring''' __lowercase ='こんにちは、世界。 \nこんばんは、㔺界。😀' __lowercase ='こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Optional[int]): '''simple docstring''' __lowercase , __lowercase =self.get_input_output_texts(_lowerCAmelCase) __lowercase =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase) __lowercase =tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase) return text, ids def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' pass # TODO add if relevant def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' pass # TODO add if relevant def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' pass # TODO add if relevant def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __lowercase =self.get_tokenizer() # Testing tokenization __lowercase ='こんにちは、世界。 こんばんは、㔺界。' __lowercase =['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] __lowercase =tokenizer.tokenize(_lowerCAmelCase) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase) # Testing conversion to ids without special tokens __lowercase =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] __lowercase =tokenizer.convert_tokens_to_ids(_lowerCAmelCase) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase) # Testing conversion to ids with special tokens __lowercase =tokens + [tokenizer.unk_token] __lowercase =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9] __lowercase =tokenizer.convert_tokens_to_ids(_lowerCAmelCase) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase) def __lowerCamelCase ( self : int): '''simple docstring''' __lowercase =self.get_tokenizer() # Testing tokenization __lowercase ='こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' __lowercase ='こんにちは、、、、世界。こんばんは、、、、世界。' __lowercase =tokenizer.encode(_lowerCAmelCase) __lowercase =tokenizer.decode(_lowerCAmelCase) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase) @slow def __lowerCamelCase ( self : List[str]): '''simple docstring''' __lowercase =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') # Testing tokenization __lowercase ='こんにちは、世界。' __lowercase ='こんばんは、㔺界。😀' __lowercase ='こんにちは、世界。こんばんは、世界。😀' __lowercase =tokenizer.encode(prefix_text + input_text) __lowercase =tokenizer.encode('' , prefix_text=prefix_text + input_text) __lowercase =tokenizer.encode(_lowerCAmelCase , prefix_text=_lowerCAmelCase) __lowercase =tokenizer.decode(_lowerCAmelCase) __lowercase =tokenizer.decode(_lowerCAmelCase) __lowercase =tokenizer.decode(_lowerCAmelCase) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase) @slow def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') # Testing tokenization __lowercase ='こんにちは、世界。' __lowercase ='こんばんは、㔺界。😀' __lowercase =len(tokenizer.encode(_lowerCAmelCase)) - 2 __lowercase =len(tokenizer.encode(_lowerCAmelCase)) - 2 __lowercase =[1] + [0] * (len_prefix + len_text + 1) __lowercase =[1] * (len_prefix + len_text + 1) + [0] __lowercase =[1] + [1] * (len_prefix) + [0] * (len_text + 1) __lowercase =tokenizer(prefix_text + input_text).token_type_ids __lowercase =tokenizer('' , prefix_text=prefix_text + input_text).token_type_ids __lowercase =tokenizer(_lowerCAmelCase , prefix_text=_lowerCAmelCase).token_type_ids self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase) @slow def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' __lowercase =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') __lowercase =tokenizer.encode('あンいワ') __lowercase =tokenizer.encode('' , prefix_text='あンいワ') __lowercase =tokenizer.encode('いワ' , prefix_text='あン') self.assertEqual(tokenizer.decode(_lowerCAmelCase) , tokenizer.decode(_lowerCAmelCase)) self.assertEqual(tokenizer.decode(_lowerCAmelCase) , tokenizer.decode(_lowerCAmelCase)) self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase) self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase) self.assertEqual(x_token_a[1] , x_token_a[-1]) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3]) # SEG token @slow def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __lowercase =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') __lowercase =[['武田信玄', 'は、'], ['織田信長', 'の配下の、']] __lowercase =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase) __lowercase =tokenizer.batch_encode_plus(_lowerCAmelCase , padding=_lowerCAmelCase) # fmt: off __lowercase =[[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]] __lowercase =[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] __lowercase =[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , _lowerCAmelCase) self.assertListEqual(x_token.token_type_ids , _lowerCAmelCase) self.assertListEqual(x_token.attention_mask , _lowerCAmelCase) self.assertListEqual(x_token_a.input_ids , _lowerCAmelCase) self.assertListEqual(x_token_a.token_type_ids , _lowerCAmelCase) self.assertListEqual(x_token_a.attention_mask , _lowerCAmelCase) def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' pass def __lowerCamelCase ( self : Any): '''simple docstring''' pass
702
'''simple docstring''' from __future__ import annotations def _A ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ): """simple docstring""" __lowercase =cipher_alphabet or [chr(_lowerCAmelCase ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) __lowercase ={ 'a': 0.0_84_97, 'b': 0.0_14_92, 'c': 0.0_22_02, 'd': 0.0_42_53, 'e': 0.1_11_62, 'f': 0.0_22_28, 'g': 0.0_20_15, 'h': 0.0_60_94, 'i': 0.0_75_46, 'j': 0.0_01_53, 'k': 0.0_12_92, 'l': 0.0_40_25, 'm': 0.0_24_06, 'n': 0.0_67_49, 'o': 0.0_75_07, 'p': 0.0_19_29, 'q': 0.0_00_95, 'r': 0.0_75_87, 's': 0.0_63_27, 't': 0.0_93_56, 'u': 0.0_27_58, 'v': 0.0_09_78, 'w': 0.0_25_60, 'x': 0.0_01_50, 'y': 0.0_19_94, 'z': 0.0_00_77, } else: # Custom frequencies dictionary __lowercase =frequencies_dict if not case_sensitive: __lowercase =ciphertext.lower() # Chi squared statistic values __lowercase ={} # cycle through all of the shifts for shift in range(len(_lowerCAmelCase ) ): __lowercase ='' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet __lowercase =(alphabet_letters.index(letter.lower() ) - shift) % len( _lowerCAmelCase ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter __lowercase =0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: __lowercase =letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message __lowercase =decrypted_with_shift.lower().count(_lowerCAmelCase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __lowercase =frequencies[letter] * occurrences # Complete the chi squared statistic formula __lowercase =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message __lowercase =decrypted_with_shift.count(_lowerCAmelCase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __lowercase =frequencies[letter] * occurrences # Complete the chi squared statistic formula __lowercase =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary __lowercase =( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_lowerCAmelCase ) -> tuple[float, str]: return chi_squared_statistic_values[key] __lowercase =min( _lowerCAmelCase , key=_lowerCAmelCase , ) # Get all the data from the most likely cipher (key, decoded message) ( ( __lowercase ) , ( __lowercase ) , ) =chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
454
0
'''simple docstring''' import datasets from .evaluate import evaluate A : int = """\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } """ A : List[str] = """ This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. """ A : Any = """ Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': the text of the answer references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the SQuAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] >>> squad_metric = datasets.load_metric(\"squad\") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def __snake_case ( self : List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )}, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ), codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''], reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''], ) def __snake_case ( self : str, _snake_case : Union[str, Any], _snake_case : str ): '''simple docstring''' snake_case : List[str] ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} snake_case : Union[str, Any] =[ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] snake_case : Optional[int] =evaluate(dataset=_snake_case, predictions=_snake_case ) return score
349
'''simple docstring''' import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def _a ( lowerCamelCase_ , lowerCamelCase_ ): snake_case : str =XCLIPTextConfig() # derive patch size from model name snake_case : int =model_name.find('''patch''' ) snake_case : Optional[Any] =int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) snake_case : Tuple =XCLIPVisionConfig(patch_size=lowerCamelCase_ , num_frames=lowerCamelCase_ ) if "large" in model_name: snake_case : List[str] =7_68 snake_case : int =30_72 snake_case : int =12 snake_case : List[str] =10_24 snake_case : Any =40_96 snake_case : Optional[int] =16 snake_case : Optional[int] =24 snake_case : Optional[int] =7_68 snake_case : Tuple =30_72 if model_name == "xclip-large-patch14-16-frames": snake_case : int =3_36 snake_case : Tuple =XCLIPConfig.from_text_vision_configs(lowerCamelCase_ , lowerCamelCase_ ) if "large" in model_name: snake_case : Dict =7_68 return config def _a ( lowerCamelCase_ ): # text encoder if name == "token_embedding.weight": snake_case : Optional[int] =name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": snake_case : List[Any] =name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: snake_case : Optional[Any] =name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: snake_case : Tuple =name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: snake_case : Union[str, Any] =name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: snake_case : List[Any] =name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): snake_case : List[Any] =name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: snake_case : Any =name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: snake_case : List[str] =name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": snake_case : int =name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": snake_case : str =name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): snake_case : Any =name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: snake_case : Tuple =name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: snake_case : Optional[int] =name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: snake_case : List[Any] =name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: snake_case : str =name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: snake_case : Any =name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: snake_case : Dict =name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: snake_case : List[Any] =name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": snake_case : List[str] =name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): snake_case : Optional[Any] =name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): snake_case : Dict =name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def _a ( lowerCamelCase_ , lowerCamelCase_ ): for key in orig_state_dict.copy().keys(): snake_case : Any =orig_state_dict.pop(lowerCamelCase_ ) if "attn.in_proj" in key: snake_case : List[Any] =key.split('''.''' ) if key.startswith('''visual''' ): snake_case : int =key_split[3] snake_case : Dict =config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: snake_case : Union[str, Any] =val[ :dim, : ] snake_case : Tuple =val[ dim : dim * 2, : ] snake_case : Any =val[ -dim:, : ] else: snake_case : Dict =val[ :dim ] snake_case : Tuple =val[ dim : dim * 2 ] snake_case : Optional[int] =val[ -dim: ] else: if "weight" in key: snake_case : Any =val[ :dim, : ] snake_case : List[str] =val[ dim : dim * 2, : ] snake_case : Union[str, Any] =val[ -dim:, : ] else: snake_case : Any =val[:dim] snake_case : Dict =val[ dim : dim * 2 ] snake_case : Optional[Any] =val[-dim:] elif key.startswith('''mit''' ): snake_case : Any =key_split[2] snake_case : Optional[Any] =config.vision_config.mit_hidden_size if "weight" in key: snake_case : Tuple =val[:dim, :] snake_case : Optional[Any] =val[dim : dim * 2, :] snake_case : str =val[-dim:, :] else: snake_case : Dict =val[:dim] snake_case : Optional[int] =val[dim : dim * 2] snake_case : int =val[-dim:] else: snake_case : Tuple =key_split[2] snake_case : List[Any] =config.text_config.hidden_size if "weight" in key: snake_case : Dict =val[:dim, :] snake_case : List[str] =val[ dim : dim * 2, : ] snake_case : Optional[Any] =val[-dim:, :] else: snake_case : Optional[Any] =val[:dim] snake_case : str =val[ dim : dim * 2 ] snake_case : List[str] =val[-dim:] else: snake_case : Optional[int] =rename_key(lowerCamelCase_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: snake_case : List[str] =val.T snake_case : Optional[int] =val return orig_state_dict def _a ( lowerCamelCase_ ): if num_frames == 8: snake_case : Optional[int] ='''eating_spaghetti_8_frames.npy''' elif num_frames == 16: snake_case : Optional[Any] ='''eating_spaghetti.npy''' elif num_frames == 32: snake_case : Dict ='''eating_spaghetti_32_frames.npy''' snake_case : List[str] =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=lowerCamelCase_ , repo_type='''dataset''' , ) snake_case : int =np.load(lowerCamelCase_ ) return list(lowerCamelCase_ ) def _a ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ): snake_case : int ={ # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } snake_case : Union[str, Any] =model_to_url[model_name] snake_case : Tuple =8 if "16-frames" in model_name: snake_case : Optional[int] =16 elif "shot" in model_name: snake_case : int =32 snake_case : Dict =get_xclip_config(lowerCamelCase_ , lowerCamelCase_ ) snake_case : Union[str, Any] =XCLIPModel(lowerCamelCase_ ) model.eval() if "drive" in checkpoint_url: snake_case : Tuple ='''pytorch_model.bin''' gdown.cached_download(lowerCamelCase_ , lowerCamelCase_ , quiet=lowerCamelCase_ ) snake_case : Dict =torch.load(lowerCamelCase_ , map_location='''cpu''' )['''model'''] else: snake_case : Optional[Any] =torch.hub.load_state_dict_from_url(lowerCamelCase_ )['''model'''] snake_case : Optional[Any] =convert_state_dict(lowerCamelCase_ , lowerCamelCase_ ) snake_case : List[str] =XCLIPModel(lowerCamelCase_ ) snake_case , snake_case : List[Any] =model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() snake_case : Tuple =3_36 if model_name == '''xclip-large-patch14-16-frames''' else 2_24 snake_case : int =VideoMAEImageProcessor(size=lowerCamelCase_ ) snake_case : Tuple =CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) snake_case : Union[str, Any] =CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) snake_case : Dict =XCLIPProcessor(image_processor=lowerCamelCase_ , tokenizer=lowerCamelCase_ ) snake_case : Optional[Any] =prepare_video(lowerCamelCase_ ) snake_case : List[str] =processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): snake_case : Dict =model(**lowerCamelCase_ ) # Verify outputs snake_case : Dict =outputs.logits_per_video snake_case : Tuple =logits_per_video.softmax(dim=1 ) print('''Probs:''' , lowerCamelCase_ ) # kinetics-400 if model_name == "xclip-base-patch32": snake_case : Any =torch.tensor([[0.00_19, 0.99_51, 0.00_30]] ) elif model_name == "xclip-base-patch32-16-frames": snake_case : Union[str, Any] =torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] ) elif model_name == "xclip-base-patch16": snake_case : Optional[Any] =torch.tensor([[0.00_83, 0.96_81, 0.02_36]] ) elif model_name == "xclip-base-patch16-16-frames": snake_case : Optional[int] =torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] ) elif model_name == "xclip-large-patch14": snake_case : Tuple =torch.tensor([[0.00_62, 0.98_64, 0.00_75]] ) elif model_name == "xclip-large-patch14-16-frames": snake_case : Tuple =torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": snake_case : Optional[int] =torch.tensor([[0.05_55, 0.89_14, 0.05_31]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": snake_case : Union[str, Any] =torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": snake_case : List[str] =torch.tensor([[0.00_36, 0.99_20, 0.00_45]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": snake_case : List[Any] =torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": snake_case : int =torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": snake_case : List[str] =torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": snake_case : Union[str, Any] =torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": snake_case : List[str] =torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": snake_case : Optional[Any] =torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": snake_case : str =torch.tensor([[0.00_27, 0.99_04, 0.00_70]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": snake_case : Optional[int] =torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": snake_case : List[Any] =torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(lowerCamelCase_ , organization='''nielsr''' ) processor.push_to_hub(lowerCamelCase_ , organization='''nielsr''' ) slow_tokenizer.push_to_hub(lowerCamelCase_ , organization='''nielsr''' ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) A : Dict = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
349
1
def __lowercase ( a : Union[str, Any] ) -> Union[str, Any]: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
717
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _lowercase ( lowerCAmelCase ): _a : Optional[Any] = ['''vqvae'''] def __init__( self : Optional[Any] , a : AutoencoderKL , a : UNetaDConditionModel , a : Mel , a : Union[DDIMScheduler, DDPMScheduler] , ): """simple docstring""" super().__init__() self.register_modules(unet=a , scheduler=a , mel=a , vqvae=a ) def _UpperCamelCase ( self : Any ): """simple docstring""" return 5_0 if isinstance(self.scheduler , a ) else 1_0_0_0 @torch.no_grad() def __call__( self : Optional[int] , a : int = 1 , a : str = None , a : np.ndarray = None , a : int = 0 , a : int = 0 , a : int = None , a : torch.Generator = None , a : float = 0 , a : float = 0 , a : torch.Generator = None , a : float = 0 , a : torch.Tensor = None , a : torch.Tensor = None , a : Optional[int]=True , ): """simple docstring""" __snake_case : List[Any] =steps or self.get_default_steps() self.scheduler.set_timesteps(a ) __snake_case : int =step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __snake_case : Any =(self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __snake_case : List[str] =randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=a , device=self.device , ) __snake_case : int =noise __snake_case : List[str] =None if audio_file is not None or raw_audio is not None: self.mel.load_audio(a , a ) __snake_case : List[Any] =self.mel.audio_slice_to_image(a ) __snake_case : Tuple =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) __snake_case : str =(input_image / 2_5_5) * 2 - 1 __snake_case : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __snake_case : Tuple =self.vqvae.encode(torch.unsqueeze(a , 0 ) ).latent_dist.sample( generator=a )[0] __snake_case : Optional[int] =self.vqvae.config.scaling_factor * input_images if start_step > 0: __snake_case : str =self.scheduler.add_noise(a , a , self.scheduler.timesteps[start_step - 1] ) __snake_case : Optional[Any] =( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __snake_case : int =int(mask_start_secs * pixels_per_second ) __snake_case : Any =int(mask_end_secs * pixels_per_second ) __snake_case : Union[str, Any] =self.scheduler.add_noise(a , a , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , a ): __snake_case : List[str] =self.unet(a , a , a )['''sample'''] else: __snake_case : Union[str, Any] =self.unet(a , a )['''sample'''] if isinstance(self.scheduler , a ): __snake_case : List[str] =self.scheduler.step( model_output=a , timestep=a , sample=a , eta=a , generator=a , )['''prev_sample'''] else: __snake_case : List[Any] =self.scheduler.step( model_output=a , timestep=a , sample=a , generator=a , )['''prev_sample'''] if mask is not None: if mask_start > 0: __snake_case : Any =mask[:, step, :, :mask_start] if mask_end > 0: __snake_case : Optional[Any] =mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __snake_case : str =1 / self.vqvae.config.scaling_factor * images __snake_case : Optional[int] =self.vqvae.decode(a )['''sample'''] __snake_case : int =(images / 2 + 0.5).clamp(0 , 1 ) __snake_case : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __snake_case : Optional[int] =(images * 2_5_5).round().astype('''uint8''' ) __snake_case : Tuple =list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(a , mode='''RGB''' ).convert('''L''' ) for _ in images) ) __snake_case : Union[str, Any] =[self.mel.image_to_audio(a ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(a )[:, np.newaxis, :] ) , **ImagePipelineOutput(a ) ) @torch.no_grad() def _UpperCamelCase ( self : int , a : List[Image.Image] , a : int = 5_0 ): """simple docstring""" assert isinstance(self.scheduler , a ) self.scheduler.set_timesteps(a ) __snake_case : Dict =np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) __snake_case : Any =(sample / 2_5_5) * 2 - 1 __snake_case : Any =torch.Tensor(a ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __snake_case : Union[str, Any] =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __snake_case : Dict =self.scheduler.alphas_cumprod[t] __snake_case : str =( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __snake_case : int =1 - alpha_prod_t __snake_case : Tuple =self.unet(a , a )['''sample'''] __snake_case : str =(1 - alpha_prod_t_prev) ** 0.5 * model_output __snake_case : Dict =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __snake_case : Optional[int] =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _UpperCamelCase ( a : torch.Tensor , a : torch.Tensor , a : float ): """simple docstring""" __snake_case : List[Any] =acos(torch.dot(torch.flatten(a ) , torch.flatten(a ) ) / torch.norm(a ) / torch.norm(a ) ) return sin((1 - alpha) * theta ) * xa / sin(a ) + sin(alpha * theta ) * xa / sin(a )
497
0
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A__ : def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=3_0 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=2 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=0.6 , __magic_name__=None , ): lowerCamelCase : Tuple = parent lowerCamelCase : List[str] = batch_size lowerCamelCase : Any = image_size lowerCamelCase : List[str] = patch_size lowerCamelCase : Optional[int] = num_channels lowerCamelCase : int = is_training lowerCamelCase : Tuple = use_labels lowerCamelCase : List[Any] = hidden_size lowerCamelCase : Dict = num_hidden_layers lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : Any = intermediate_size lowerCamelCase : List[Any] = hidden_act lowerCamelCase : Tuple = hidden_dropout_prob lowerCamelCase : str = attention_probs_dropout_prob lowerCamelCase : int = type_sequence_label_size lowerCamelCase : Dict = initializer_range lowerCamelCase : Optional[Any] = mask_ratio lowerCamelCase : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase : int = (image_size // patch_size) ** 2 lowerCamelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : List[str] = None if self.use_labels: lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Dict = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Union[str, Any] = TFViTMAEModel(config=__lowerCAmelCase ) lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , training=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = TFViTMAEForPreTraining(__lowerCAmelCase ) lowerCamelCase : List[str] = model(__lowerCAmelCase , training=__lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase : List[str] = (self.image_size // self.patch_size) ** 2 lowerCamelCase : str = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase : Optional[Any] = 1 lowerCamelCase : Union[str, Any] = TFViTMAEForPreTraining(__lowerCAmelCase ) lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , training=__lowerCAmelCase ) lowerCamelCase : List[str] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.prepare_config_and_inputs() (lowerCamelCase) : Tuple = config_and_inputs lowerCamelCase : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Tuple = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCAmelCase : Optional[Any] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} _UpperCAmelCase : Dict = False _UpperCAmelCase : Union[str, Any] = False _UpperCAmelCase : Union[str, Any] = False _UpperCAmelCase : Tuple = False def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = TFViTMAEModelTester(self ) lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Dict = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCamelCase : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Layer ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) lowerCamelCase : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Optional[Any] = [*signature.parameters.keys()] lowerCamelCase : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Any = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase : int = model_class(__lowerCAmelCase ) lowerCamelCase : str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase : str = model(__lowerCAmelCase , noise=__lowerCAmelCase ) lowerCamelCase : str = copy.deepcopy(self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase , noise=__lowerCAmelCase ) lowerCamelCase : Any = outputs_dict[0].numpy() lowerCamelCase : Any = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 ) def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : str = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(__magic_name__ ): lowerCamelCase : Dict = {} for k, v in inputs_dict.items(): if tf.is_tensor(__lowerCAmelCase ): lowerCamelCase : Optional[int] = v.numpy() else: lowerCamelCase : Union[str, Any] = np.array(__lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) lowerCamelCase : Optional[Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase : Any = prepare_numpy_arrays(__lowerCAmelCase ) lowerCamelCase : Dict = model(__lowerCAmelCase , noise=__lowerCAmelCase ) lowerCamelCase : Dict = model(**__lowerCAmelCase , noise=__lowerCAmelCase ) self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): np.random.seed(2 ) lowerCamelCase : Tuple = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase : List[str] = tf.constant(__lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase : List[Any] = tf_noise super().check_pt_tf_models(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(__lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(__lowerCAmelCase , __lowerCAmelCase ),) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(__lowerCAmelCase , """_keras_serializable""" , __lowerCAmelCase ) } lowerCamelCase : Dict = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase : Dict = tf.convert_to_tensor(__lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase : str = main_layer_class(__lowerCAmelCase ) lowerCamelCase : Optional[Any] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase : List[str] = tf.keras.Model(__lowerCAmelCase , outputs=main_layer(__lowerCAmelCase ) ) lowerCamelCase : Tuple = model(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , """keras_model.h5""" ) model.save(__lowerCAmelCase ) lowerCamelCase : Dict = tf.keras.models.load_model( __lowerCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(__lowerCAmelCase , tf.keras.Model ) lowerCamelCase : int = model(__lowerCAmelCase ) self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase ) @slow def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Dict = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase : Tuple = model_class(__lowerCAmelCase ) lowerCamelCase : str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase : List[Any] = model(__lowerCAmelCase , noise=__lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase : Dict = outputs.last_hidden_state.numpy() lowerCamelCase : Any = 0 else: lowerCamelCase : Dict = outputs.logits.numpy() lowerCamelCase : Union[str, Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCAmelCase , saved_model=__lowerCAmelCase ) lowerCamelCase : List[str] = model_class.from_pretrained(__lowerCAmelCase ) lowerCamelCase : int = model(__lowerCAmelCase , noise=__lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase : Optional[int] = after_outputs['''last_hidden_state'''].numpy() lowerCamelCase : str = 0 else: lowerCamelCase : str = after_outputs['''logits'''].numpy() lowerCamelCase : Optional[Any] = 0 lowerCamelCase : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCAmelCase , 1e-5 ) def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) lowerCamelCase : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase : Tuple = model(__lowerCAmelCase , noise=__lowerCAmelCase ) lowerCamelCase : Tuple = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(__lowerCAmelCase ) lowerCamelCase : List[Any] = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase : Union[str, Any] = model_class.from_config(model.config ) lowerCamelCase : Dict = new_model(__lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase : Any = new_model(__lowerCAmelCase , noise=__lowerCAmelCase ) self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase__ ( self ): pass @slow def UpperCamelCase__ ( self ): lowerCamelCase : Any = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(__lowerCAmelCase ) def _a ( ): lowerCamelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class A__ ( unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase__ ( self ): np.random.seed(2 ) lowerCamelCase : str = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase : Tuple = self.default_image_processor lowerCamelCase : Dict = prepare_img() lowerCamelCase : Dict = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase : Tuple = ViTMAEConfig() lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase : List[str] = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase : str = model(**__lowerCAmelCase , noise=__lowerCAmelCase ) # verify the logits lowerCamelCase : List[Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) lowerCamelCase : int = tf.convert_to_tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
681
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1""" SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart""" @require_torch class lowerCamelCase_ ( lowerCamelCase ): def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ): """simple docstring""" __magic_name__ :List[Any] = self.run_trainer( eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , ) __magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history if not do_eval: return __magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :str = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats __magic_name__ :Tuple = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @require_torch_multi_gpu def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def A ( self ): """simple docstring""" self.run_seqaseq_quick( distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase ) @require_apex @require_torch_gpu def A ( self ): """simple docstring""" # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def A ( self , __lowerCAmelCase ): """simple docstring""" # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout __magic_name__ :Any = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } __magic_name__ :Optional[Any] = experiments[experiment_id] __magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} __magic_name__ :Optional[int] = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] ) __magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) ) self.assertEqual(__lowerCAmelCase , data['''n_matches'''] ) @slow def A ( self ): """simple docstring""" __magic_name__ :List[str] = self.run_trainer( eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , ) # Check metrics __magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()] __magic_name__ :Any = eval_metrics[0] __magic_name__ :int = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase ) # test if do_predict saves generations and metrics __magic_name__ :List[Any] = os.listdir(__lowerCAmelCase ) __magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def A ( self ): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]: __magic_name__ :str = '''--skip_memory_metrics 0''' __magic_name__ :Dict = self.run_trainer( max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , ) # Check metrics __magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history __magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 ) __magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 ) __magic_name__ :Any = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss __magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) __magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) __magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb __magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig __magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb __magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings __magic_name__ :Optional[Any] = 1_2_0 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( __lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ): """simple docstring""" __magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' __magic_name__ :Dict = self.get_auto_remove_tmp_dir() __magic_name__ :Tuple = F''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__lowerCAmelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__lowerCAmelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() __magic_name__ :str = F''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__lowerCAmelCase )} '''.split() __magic_name__ :Dict = ''' --do_predict '''.split() __magic_name__ :Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: __magic_name__ :List[Any] = get_gpu_count() __magic_name__ :Tuple = get_torch_dist_unique_port() __magic_name__ :Union[str, Any] = F''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() __magic_name__ :Any = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCAmelCase , env=self.get_env() ) else: __magic_name__ :List[Any] = ['''run_translation.py'''] + args with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ): main() return output_dir
0
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCAmelCase_ ( lowerCamelCase__ ): '''simple docstring''' __snake_case = "openai-gpt" __snake_case = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=4_04_78 , _UpperCAmelCase=5_12 , _UpperCAmelCase=7_68 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase="cls_index" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , **_UpperCAmelCase , ): snake_case_ = vocab_size snake_case_ = n_positions snake_case_ = n_embd snake_case_ = n_layer snake_case_ = n_head snake_case_ = afn snake_case_ = resid_pdrop snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = layer_norm_epsilon snake_case_ = initializer_range snake_case_ = summary_type snake_case_ = summary_use_proj snake_case_ = summary_activation snake_case_ = summary_first_dropout snake_case_ = summary_proj_to_labels super().__init__(**_UpperCAmelCase )
710
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int: """simple docstring""" assert column_title.isupper() snake_case_ = 0 snake_case_ = len(SCREAMING_SNAKE_CASE ) - 1 snake_case_ = 0 while index >= 0: snake_case_ = (ord(column_title[index] ) - 64) * pow(26 , SCREAMING_SNAKE_CASE ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
531
0
from __future__ import annotations class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Any , UpperCAmelCase_ : int) ->None: '''simple docstring''' lowerCamelCase__: List[str] =order # a_{0} ... a_{k} lowerCamelCase__: Tuple =[1.0] + [0.0] * order # b_{0} ... b_{k} lowerCamelCase__: Any =[1.0] + [0.0] * order # x[n-1] ... x[n-k] lowerCamelCase__: int =[0.0] * self.order # y[n-1] ... y[n-k] lowerCamelCase__: List[Any] =[0.0] * self.order def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : list[float] , UpperCAmelCase_ : list[float]) ->None: '''simple docstring''' if len(UpperCAmelCase_) < self.order: lowerCamelCase__: Tuple =[1.0, *a_coeffs] if len(UpperCAmelCase_) != self.order + 1: lowerCamelCase__: List[str] =( F"""Expected a_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(UpperCAmelCase_)}""" ) raise ValueError(UpperCAmelCase_) if len(UpperCAmelCase_) != self.order + 1: lowerCamelCase__: List[Any] =( F"""Expected b_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(UpperCAmelCase_)}""" ) raise ValueError(UpperCAmelCase_) lowerCamelCase__: Tuple =a_coeffs lowerCamelCase__: List[Any] =b_coeffs def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : float) ->float: '''simple docstring''' lowerCamelCase__: List[Any] =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) lowerCamelCase__: Union[str, Any] =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] lowerCamelCase__: str =self.input_history[:-1] lowerCamelCase__: Any =self.output_history[:-1] lowerCamelCase__: int =sample lowerCamelCase__: Tuple =result return result
59
from __future__ import annotations from math import pi def lowerCAmelCase_ ( __a , __a , __a ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
59
1
from __future__ import annotations def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> None: snake_case__ = len(__lowerCAmelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(__lowerCAmelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __lowerCAmelCase , __lowerCAmelCase , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> None: snake_case__ = [] depth_first_search([] , [] , [] , __lowerCAmelCase , __lowerCAmelCase ) # Print all the boards for board in boards: for column in board: print(__lowerCAmelCase ) print('''''' ) print(len(__lowerCAmelCase ) , '''solutions were found.''' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
208
from string import ascii_lowercase, ascii_uppercase def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str: if not sentence: return "" snake_case__ = dict(zip(__lowerCAmelCase , __lowerCAmelCase ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
208
1
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class _lowerCamelCase( _a, unittest.TestCase ): lowercase_ : List[Any] = FlaxAutoencoderKL @property def UpperCamelCase ( self) -> List[Any]: """simple docstring""" _lowercase : Any = 4 _lowercase : str = 3 _lowercase : str = (32, 32) _lowercase : int = jax.random.PRNGKey(0) _lowercase : Tuple = jax.random.uniform(lowerCamelCase, ((batch_size, num_channels) + sizes)) return {"sample": image, "prng_key": prng_key} def UpperCamelCase ( self) -> str: """simple docstring""" _lowercase : List[Any] = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } _lowercase : Any = self.dummy_input return init_dict, inputs_dict
89
def UpperCamelCase_( lowerCamelCase_ ) -> int: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError('multiplicative_persistence() only accepts integral values' ) if num < 0: raise ValueError('multiplicative_persistence() does not accept negative values' ) _lowercase : List[str] = 0 _lowercase : Optional[int] = str(lowerCamelCase_ ) while len(lowerCamelCase_ ) != 1: _lowercase : Any = [int(lowerCamelCase_ ) for i in num_string] _lowercase : List[Any] = 1 for i in range(0 , len(lowerCamelCase_ ) ): total *= numbers[i] _lowercase : Optional[Any] = str(lowerCamelCase_ ) steps += 1 return steps def UpperCamelCase_( lowerCamelCase_ ) -> int: if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError('additive_persistence() only accepts integral values' ) if num < 0: raise ValueError('additive_persistence() does not accept negative values' ) _lowercase : Optional[int] = 0 _lowercase : str = str(lowerCamelCase_ ) while len(lowerCamelCase_ ) != 1: _lowercase : Dict = [int(lowerCamelCase_ ) for i in num_string] _lowercase : Any = 0 for i in range(0 , len(lowerCamelCase_ ) ): total += numbers[i] _lowercase : Dict = str(lowerCamelCase_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
89
1
"""simple docstring""" from math import sqrt def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = 0 for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(UpperCamelCase_ ): total += i + n // i elif i == sqrt(UpperCamelCase_ ): total += i return total - n def _lowerCAmelCase ( UpperCamelCase_ = 1_0000 ): __SCREAMING_SNAKE_CASE = sum( i for i in range(1 , UpperCamelCase_ ) if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
248
"""simple docstring""" def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): if b == 0: return 1 if (b % 2) == 0: return actual_power(UpperCamelCase_ , int(b / 2 ) ) * actual_power(UpperCamelCase_ , int(b / 2 ) ) else: return a * actual_power(UpperCamelCase_ , int(b / 2 ) ) * actual_power(UpperCamelCase_ , int(b / 2 ) ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): if b < 0: return 1 / actual_power(UpperCamelCase_ , UpperCamelCase_ ) return actual_power(UpperCamelCase_ , UpperCamelCase_ ) if __name__ == "__main__": print(power(-2, -3))
248
1
def __lowerCAmelCase ( __magic_name__ = 1_0_0 ): _lowercase: Any = n * (n + 1) * (2 * n + 1) / 6 _lowercase: Optional[int] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'''{solution() = }''')
226
"""simple docstring""" from math import sqrt def UpperCAmelCase ( a__ = 1_00_00_00 ): '''simple docstring''' lowerCAmelCase :int = 0 lowerCAmelCase :int = 0 lowerCAmelCase :int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(a__ , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
553
0
from timeit import timeit a_ = { "MALAYALAM": True, "String": False, "rotor": True, "level": True, "A": True, "BB": True, "ABC": False, "amanaplanacanalpanama": True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case_ : int = 0 snake_case_ : Tuple = len(__SCREAMING_SNAKE_CASE ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case_ : str = len(__SCREAMING_SNAKE_CASE ) // 2 snake_case_ : str = len(__SCREAMING_SNAKE_CASE ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(__SCREAMING_SNAKE_CASE ) ) def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" if len(__SCREAMING_SNAKE_CASE ) <= 2: return True if s[0] == s[len(__SCREAMING_SNAKE_CASE ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" return s == s[::-1] def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case_ : Tuple = f'all({name}(key) is value for key, value in test_data.items())' snake_case_ : Union[str, Any] = f'from __main__ import test_data, {name}' snake_case_ : int = 5_0_0_0_0_0 snake_case_ : Optional[Any] = timeit(stmt=__SCREAMING_SNAKE_CASE, setup=__SCREAMING_SNAKE_CASE, number=__SCREAMING_SNAKE_CASE ) print(f'{name:<35} finished {number:,} runs in {result:.5f} seconds' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"""{key:21} {value}""") print("a man a plan a canal panama") # finished 500,000 runs in 0.46793 seconds benchmark_function("is_palindrome_slice") # finished 500,000 runs in 0.85234 seconds benchmark_function("is_palindrome") # finished 500,000 runs in 1.32028 seconds benchmark_function("is_palindrome_recursive") # finished 500,000 runs in 2.08679 seconds benchmark_function("is_palindrome_traversal")
701
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase a_ = logging.get_logger(__name__) a_ = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class UpperCAmelCase_ ( snake_case__ ): UpperCAmelCase_ = """longformer""" def __init__( self , lowercase_ = 5_12 , lowercase_ = 2 , lowercase_ = 1 , lowercase_ = 0 , lowercase_ = 2 , lowercase_ = 3_05_22 , lowercase_ = 7_68 , lowercase_ = 12 , lowercase_ = 12 , lowercase_ = 30_72 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 5_12 , lowercase_ = 2 , lowercase_ = 0.02 , lowercase_ = 1E-12 , lowercase_ = False , **lowercase_ , ): super().__init__(pad_token_id=lowercase_ , **lowercase_) snake_case_ : Dict = attention_window snake_case_ : Tuple = sep_token_id snake_case_ : Optional[Any] = bos_token_id snake_case_ : str = eos_token_id snake_case_ : Optional[int] = vocab_size snake_case_ : Dict = hidden_size snake_case_ : Optional[Any] = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : Union[str, Any] = hidden_act snake_case_ : Union[str, Any] = intermediate_size snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Optional[Any] = attention_probs_dropout_prob snake_case_ : List[str] = max_position_embeddings snake_case_ : str = type_vocab_size snake_case_ : Tuple = initializer_range snake_case_ : List[str] = layer_norm_eps snake_case_ : Tuple = onnx_export class UpperCAmelCase_ ( snake_case__ ): def __init__( self , lowercase_ , lowercase_ = "default" , lowercase_ = None): super().__init__(lowercase_ , lowercase_ , lowercase_) snake_case_ : Dict = True @property def snake_case__ ( self): if self.task == "multiple-choice": snake_case_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: snake_case_ : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("global_attention_mask", dynamic_axis), ]) @property def snake_case__ ( self): snake_case_ : Union[str, Any] = super().outputs if self.task == "default": snake_case_ : str = {0: "batch"} return outputs @property def snake_case__ ( self): return 1E-4 @property def snake_case__ ( self): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14) def snake_case__ ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ): snake_case_ : Optional[Any] = super().generate_dummy_inputs( preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly snake_case_ : Any = torch.zeros_like(inputs["input_ids"]) # make every second token global snake_case_ : Tuple = 1 return inputs
92
0
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
23
"""simple docstring""" __UpperCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)} def A ( _A ): """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) ) def A ( ): """simple docstring""" return sum( number for number in range(1_000, 1_000_000 ) if number == digits_fifth_powers_sum(_A ) ) if __name__ == "__main__": print(solution())
584
0
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( A__ , unittest.TestCase ): """simple docstring""" snake_case_ = XLNetTokenizer snake_case_ = XLNetTokenizerFast snake_case_ = True snake_case_ = True def lowerCAmelCase ( self : str )-> int: super().setUp() # We have a SentencePiece fixture for testing snake_case = XLNetTokenizer(__snake_case , keep_accents=__snake_case ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] )-> int: snake_case = """<s>""" snake_case = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case ) def lowerCAmelCase ( self : Dict )-> str: snake_case = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<eod>""" ) self.assertEqual(len(__snake_case ) , 10_06 ) def lowerCAmelCase ( self : int )-> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def lowerCAmelCase ( self : List[str] )-> Optional[Any]: snake_case = XLNetTokenizer(__snake_case , keep_accents=__snake_case ) snake_case = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [2_85, 46, 10, 1_70, 3_82] ) snake_case = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __snake_case , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) snake_case = tokenizer.convert_tokens_to_ids(__snake_case ) self.assertListEqual(__snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) snake_case = tokenizer.convert_ids_to_tokens(__snake_case ) self.assertListEqual( __snake_case , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowerCAmelCase ( self : Dict )-> Tuple: snake_case = XLNetTokenizer(__snake_case , do_lower_case=__snake_case ) snake_case = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __snake_case , [ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] ) def lowerCAmelCase ( self : Optional[Any] )-> int: snake_case = XLNetTokenizer(__snake_case , do_lower_case=__snake_case ) snake_case = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __snake_case , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) @slow def lowerCAmelCase ( self : Any )-> Union[str, Any]: snake_case = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) snake_case = tokenizer.encode("""sequence builders""" , add_special_tokens=__snake_case ) snake_case = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__snake_case ) snake_case = tokenizer.build_inputs_with_special_tokens(__snake_case ) snake_case = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def lowerCAmelCase ( self : List[Any] )-> List[Any]: # fmt: off snake_case = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
711
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( __lowerCAmelCase : list[int] ) -> int: snake_case = len(__lowerCAmelCase ) // 2 # choose the middle 3 elements snake_case = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m] ) == 2: m -= 1 return peak(lst[m:] ) # decreasing else: if len(lst[:m] ) == 2: m += 1 return peak(lst[:m] ) if __name__ == "__main__": import doctest doctest.testmod()
517
0
"""simple docstring""" import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed _UpperCamelCase = logging.getLogger(__name__) def _a ( _snake_case=2 , _snake_case=3 , _snake_case=16 , _snake_case = 10 , _snake_case = 2 ): """simple docstring""" def get_dataset(_snake_case ): UpperCAmelCase = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(lowercase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCAmelCase = get_dataset(lowercase__ ) UpperCAmelCase = get_dataset(lowercase__ ) UpperCAmelCase = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 ) UpperCAmelCase = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None ): """simple docstring""" UpperCAmelCase = [] for epoch in range(lowercase__ ): # Train quickly model.train() for batch in dataloader: UpperCAmelCase , UpperCAmelCase = batch UpperCAmelCase = model(lowercase__ ) UpperCAmelCase = torch.nn.functional.mse_loss(lowercase__ , lowercase__ ) accelerator.backward(lowercase__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class lowerCamelCase__ ( nn.Module ): def __init__( self ): super().__init__() UpperCAmelCase = nn.Parameter(torch.randn(1 ) ) UpperCAmelCase = nn.Parameter(torch.randn(1 ) ) def _UpperCamelCase ( self ,A ): return x * self.a + self.b class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase = DummyModel() UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase , UpperCAmelCase = dummy_dataloaders() UpperCAmelCase = ProjectConfiguration(total_limit=1 ,project_dir=__UpperCamelCase ,automatic_checkpoint_naming=__UpperCamelCase ) # Train baseline UpperCAmelCase = Accelerator(project_config=__UpperCamelCase ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 ) def _UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase = DummyModel() UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase , UpperCAmelCase = dummy_dataloaders() # Train baseline UpperCAmelCase = Accelerator() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Save initial UpperCAmelCase = os.path.join(__UpperCamelCase ,"""initial""" ) accelerator.save_state(__UpperCamelCase ) ((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item() UpperCAmelCase = optimizer.state_dict() UpperCAmelCase = train(3 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item() UpperCAmelCase = optimizer.state_dict() # Train partially set_seed(42 ) UpperCAmelCase = DummyModel() UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase , UpperCAmelCase = dummy_dataloaders() UpperCAmelCase = Accelerator() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) accelerator.load_state(__UpperCamelCase ) ((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item() UpperCAmelCase = optimizer.state_dict() self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) UpperCAmelCase = train(2 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Save everything UpperCAmelCase = os.path.join(__UpperCamelCase ,"""checkpoint""" ) accelerator.save_state(__UpperCamelCase ) # Load everything back in and make sure all states work accelerator.load_state(__UpperCamelCase ) test_rands += train(1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item() UpperCAmelCase = optimizer.state_dict() self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) def _UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase = DummyModel() UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase , UpperCAmelCase = dummy_dataloaders() UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__UpperCamelCase ) # Train baseline UpperCAmelCase = Accelerator(project_dir=__UpperCamelCase ,project_config=__UpperCamelCase ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Save initial accelerator.save_state() ((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item() UpperCAmelCase = optimizer.state_dict() UpperCAmelCase = train(3 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item() UpperCAmelCase = optimizer.state_dict() # Train partially set_seed(42 ) UpperCAmelCase = DummyModel() UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase , UpperCAmelCase = dummy_dataloaders() UpperCAmelCase = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=__UpperCamelCase ) UpperCAmelCase = Accelerator(project_dir=__UpperCamelCase ,project_config=__UpperCamelCase ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) accelerator.load_state(os.path.join(__UpperCamelCase ,"""checkpoints""" ,"""checkpoint_0""" ) ) ((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item() UpperCAmelCase = optimizer.state_dict() self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) UpperCAmelCase = train(2 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(__UpperCamelCase ,"""checkpoints""" ,"""checkpoint_1""" ) ) test_rands += train(1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item() UpperCAmelCase = optimizer.state_dict() self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) def _UpperCamelCase ( self ): UpperCAmelCase = torch.tensor([1, 2, 3] ) UpperCAmelCase = torch.tensor([2, 3, 4] ) UpperCAmelCase = DummyModel() UpperCAmelCase = torch.optim.Adam(net.parameters() ) UpperCAmelCase = Accelerator() with self.assertRaises(__UpperCamelCase ) as ve: accelerator.register_for_checkpointing(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) UpperCAmelCase = str(ve.exception ) self.assertTrue("""Item at index 0""" in message ) self.assertTrue("""Item at index 1""" in message ) self.assertFalse("""Item at index 2""" in message ) self.assertFalse("""Item at index 3""" in message ) def _UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase = DummyModel() UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 ) UpperCAmelCase = torch.optim.lr_scheduler.StepLR(__UpperCamelCase ,step_size=1 ,gamma=0.99 ) UpperCAmelCase , UpperCAmelCase = dummy_dataloaders() UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__UpperCamelCase ) # Train baseline UpperCAmelCase = Accelerator(project_dir=__UpperCamelCase ,project_config=__UpperCamelCase ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Save initial accelerator.save_state() UpperCAmelCase = scheduler.state_dict() train(3 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) self.assertNotEqual(__UpperCamelCase ,scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(__UpperCamelCase ,"""checkpoints""" ,"""checkpoint_0""" ) ) self.assertEqual(__UpperCamelCase ,scheduler.state_dict() ) def _UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase = DummyModel() UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__UpperCamelCase ,total_limit=2 ) # Train baseline UpperCAmelCase = Accelerator(project_dir=__UpperCamelCase ,project_config=__UpperCamelCase ) UpperCAmelCase = accelerator.prepare(__UpperCamelCase ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(__UpperCamelCase ,"""checkpoints""" ,"""checkpoint_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase ,"""checkpoints""" ,"""checkpoint_9""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase ,"""checkpoints""" ,"""checkpoint_10""" ) ) ) @require_cuda def _UpperCamelCase ( self ): UpperCAmelCase = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() ) if __name__ == "__main__": _UpperCamelCase = """/tmp/accelerate/state_checkpointing""" _UpperCamelCase = DummyModel() _UpperCamelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3) _UpperCamelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) _UpperCamelCase , _UpperCamelCase = dummy_dataloaders() _UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline _UpperCamelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) _UpperCamelCase , _UpperCamelCase = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: _UpperCamelCase = group["""params"""][0].device break assert param_device.type == accelerator.device.type _UpperCamelCase = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""") for group in optimizer.param_groups: _UpperCamelCase = group["""params"""][0].device break assert ( param_device.type == torch.device("""cpu""").type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""") for group in optimizer.param_groups: _UpperCamelCase = group["""params"""][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""): accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
341
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A = logging.get_logger(__name__) A = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): """simple docstring""" __A = """bit""" __A = ["""preactivation""", """bottleneck"""] __A = ["""SAME""", """VALID"""] def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ): """simple docstring""" super().__init__(**__UpperCamelCase ) if layer_type not in self.layer_types: raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: snake_case_ = global_padding.upper() else: raise ValueError(f"""Padding strategy {global_padding} not supported""" ) snake_case_ = num_channels snake_case_ = embedding_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = layer_type snake_case_ = hidden_act snake_case_ = global_padding snake_case_ = num_groups snake_case_ = drop_path_rate snake_case_ = embedding_dynamic_padding snake_case_ = output_stride snake_case_ = width_factor snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
187
0
"""simple docstring""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__) SCREAMING_SNAKE_CASE : List[Any] = """pytorch_model.bin""" @dataclasses.dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =dataclasses.field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} ) lowerCamelCase__ =dataclasses.field( default=__snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'}, ) @dataclasses.dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} ) lowerCamelCase__ =dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} ) lowerCamelCase__ =dataclasses.field( default=__snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} ) lowerCamelCase__ =dataclasses.field( default=__snake_case, metadata={'help': 'The name of the task to train on.'}, ) lowerCamelCase__ =dataclasses.field( default=__snake_case, metadata={'help': 'The list of labels for the task.'} ) @dataclasses.dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =dataclasses.field( metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} ) lowerCamelCase__ =dataclasses.field( default='accuracy', metadata={'help': 'The evaluation metric used for the task.'} ) lowerCamelCase__ =dataclasses.field( default='no', metadata={ 'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' }, ) lowerCamelCase__ =dataclasses.field( default=10, metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'}, ) lowerCamelCase__ =dataclasses.field( default=0.0, metadata={ 'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.' }, ) lowerCamelCase__ =dataclasses.field( default=__snake_case, metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'}, ) lowerCamelCase__ =dataclasses.field( default=__snake_case, metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'}, ) lowerCamelCase__ =dataclasses.field( default=__snake_case, metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'}, ) lowerCamelCase__ =dataclasses.field( default=0.0, metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'}, ) lowerCamelCase__ =dataclasses.field( default=100, metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'}, ) lowerCamelCase__ =dataclasses.field( default=__snake_case, metadata={'help': 'Random seed for initialization.'}, ) def lowercase ( _snake_case : List[str] , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : int , _snake_case : Any ) ->Dict: """simple docstring""" __snake_case : List[str] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: __snake_case : int = dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 __snake_case : Union[str, Any] = int(eval_result * len(_snake_case ) ) print(_snake_case ) __snake_case : int = dataset.sort('''probability''' , reverse=_snake_case ) __snake_case : Any = dataset.select(range(_snake_case ) ) __snake_case : int = dataset.remove_columns(['''label''', '''probability'''] ) __snake_case : List[Any] = dataset.rename_column('''prediction''' , '''label''' ) __snake_case : str = dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} ) __snake_case : List[Any] = dataset.shuffle(seed=args.seed ) __snake_case : List[str] = os.path.join(_snake_case , f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(_snake_case , index=_snake_case ) else: dataset.to_json(_snake_case ) def lowercase ( _snake_case : Tuple , _snake_case : List[str] , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : int = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() __snake_case : str = STModelArguments(model_name_or_path=_snake_case ) __snake_case : List[str] = STDataArguments(train_file=_snake_case , infer_file=_snake_case ) __snake_case : Union[str, Any] = STTrainingArguments(output_dir=_snake_case ) __snake_case : Optional[Any] = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(_snake_case ).items(): setattr(_snake_case , _snake_case , _snake_case ) for key, value in kwargs.items(): if hasattr(_snake_case , _snake_case ): setattr(_snake_case , _snake_case , _snake_case ) # Sanity checks __snake_case : Optional[int] = {} __snake_case : List[str] = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None __snake_case : Tuple = args.train_file __snake_case : List[Any] = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None __snake_case : List[str] = args.eval_file for key in data_files: __snake_case : str = data_files[key].split('''.''' )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: __snake_case : Tuple = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('''Creating the initial data directory for self-training...''' ) __snake_case : List[str] = f"""{args.output_dir}/self-train_iter-{{}}""".format __snake_case : Dict = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=_snake_case ) os.makedirs(_snake_case , exist_ok=_snake_case ) accelerator.wait_for_everyone() __snake_case : Any = None __snake_case : Union[str, Any] = None __snake_case : Dict = 0 __snake_case : Optional[int] = False # Show the progress bar __snake_case : Tuple = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): __snake_case : int = data_dir_format(_snake_case ) assert os.path.exists(_snake_case ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 __snake_case : Tuple = os.path.join(_snake_case , '''stage-1''' ) __snake_case : List[str] = { '''accelerator''': accelerator, '''model_name_or_path''': args.model_name_or_path, '''cache_dir''': args.cache_dir, '''do_train''': True, '''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''], '''do_eval''': True if args.eval_file is not None else False, '''eval_file''': data_files['''eval'''], '''do_predict''': True, '''infer_file''': data_files['''infer'''], '''task_name''': args.task_name, '''label_list''': args.label_list, '''output_dir''': current_output_dir, '''eval_metric''': args.eval_metric, '''evaluation_strategy''': args.evaluation_strategy, '''early_stopping_patience''': args.early_stopping_patience, '''early_stopping_threshold''': args.early_stopping_threshold, '''seed''': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(_snake_case , _snake_case ): arguments_dict.update({key: value} ) __snake_case : int = os.path.join(_snake_case , '''best-checkpoint''' , _snake_case ) if os.path.exists(_snake_case ): logger.info( '''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , _snake_case , _snake_case , ) else: logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , _snake_case ) finetune(**_snake_case ) accelerator.wait_for_everyone() assert os.path.exists(_snake_case ) logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , _snake_case ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data __snake_case : List[str] = os.path.join(_snake_case , '''best-checkpoint''' ) __snake_case : List[str] = os.path.join(_snake_case , '''stage-2''' ) # Update arguments_dict __snake_case : Union[str, Any] = model_path __snake_case : List[str] = data_files['''train'''] __snake_case : Dict = current_output_dir __snake_case : Optional[Any] = os.path.join(_snake_case , '''best-checkpoint''' , _snake_case ) if os.path.exists(_snake_case ): logger.info( '''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , _snake_case , _snake_case , ) else: logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , _snake_case ) finetune(**_snake_case ) accelerator.wait_for_everyone() assert os.path.exists(_snake_case ) logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , _snake_case ) __snake_case : Tuple = iteration __snake_case : Any = data_dir_format(iteration + 1 ) __snake_case : Optional[Any] = AutoConfig.from_pretrained(os.path.join(_snake_case , '''best-checkpoint''' ) ) __snake_case : int = config.idalabel __snake_case : Union[str, Any] = os.path.join(_snake_case , '''eval_results_best-checkpoint.json''' ) __snake_case : Tuple = os.path.join(_snake_case , '''test_results_best-checkpoint.json''' ) assert os.path.exists(_snake_case ) with open(_snake_case , '''r''' ) as f: __snake_case : Optional[Any] = float(json.load(_snake_case )[args.eval_metric] ) __snake_case : Optional[int] = os.path.join(_snake_case , '''infer_output_best-checkpoint.csv''' ) assert os.path.exists(_snake_case ) # Loading the dataset from local csv or json files. __snake_case : Dict = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data'''] __snake_case : Tuple = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data'''] if accelerator.is_main_process: os.makedirs(_snake_case , exist_ok=_snake_case ) shutil.copy(_snake_case , os.path.join(_snake_case , f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(_snake_case ): shutil.copy(_snake_case , os.path.join(_snake_case , f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) accelerator.wait_for_everyone() __snake_case : Union[str, Any] = os.path.join(_snake_case , f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: __snake_case : str = eval_result if best_iteration is None: __snake_case : Dict = new_iteration __snake_case : Optional[int] = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: __snake_case : Dict = new_iteration __snake_case : Any = new_eval_result __snake_case : Union[str, Any] = 0 else: if new_eval_result == best_eval_result: __snake_case : Any = new_iteration __snake_case : List[str] = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: __snake_case : str = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('''Best iteration: %d''' , _snake_case ) logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_snake_case , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(_snake_case , '''eval_results_best-iteration.json''' ) , ) else: # Assume that the last iteration is the best logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 ) logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_snake_case , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(_snake_case , '''eval_results_best-iteration.json''' ) , )
229
"""simple docstring""" from __future__ import annotations from functools import lru_cache from math import ceil SCREAMING_SNAKE_CASE : str = 100 SCREAMING_SNAKE_CASE : str = set(range(3, NUM_PRIMES, 2)) primes.add(2) SCREAMING_SNAKE_CASE : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def lowercase ( _snake_case : int ) ->set[int]: """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} __snake_case : set[int] = set() __snake_case : int __snake_case : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def lowercase ( _snake_case : int = 5_000 ) ->int | None: """simple docstring""" for number_to_partition in range(1 , _snake_case ): if len(partition(_snake_case ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F'{solution() = }')
229
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self :List[str] , lowerCamelCase__ :int , lowerCamelCase__ :List[str]=7 , lowerCamelCase__ :Any=3 , lowerCamelCase__ :Union[str, Any]=30 , lowerCamelCase__ :int=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Tuple=None , lowerCamelCase__ :Tuple=True , lowerCamelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCamelCase__ :int=[0.5, 0.5, 0.5] , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Optional[Any]=1 / 2_55 , lowerCamelCase__ :str=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p UpperCamelCase__ :List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33} UpperCamelCase__ :Union[str, Any] = parent UpperCamelCase__ :List[str] = batch_size UpperCamelCase__ :int = num_channels UpperCamelCase__ :int = min_resolution UpperCamelCase__ :Tuple = max_resolution UpperCamelCase__ :Union[str, Any] = do_resize UpperCamelCase__ :int = size UpperCamelCase__ :Optional[int] = do_normalize UpperCamelCase__ :Optional[Any] = image_mean UpperCamelCase__ :Optional[Any] = image_std UpperCamelCase__ :Dict = do_rescale UpperCamelCase__ :List[Any] = rescale_factor UpperCamelCase__ :Any = do_pad def __a ( self :Tuple ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __a ( self :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Any=False ): if not batched: UpperCamelCase__ :Optional[Any] = image_inputs[0] if isinstance(lowerCamelCase__ , Image.Image ): UpperCamelCase__ , UpperCamelCase__ :str = image.size else: UpperCamelCase__ , UpperCamelCase__ :List[str] = image.shape[1], image.shape[2] if w < h: UpperCamelCase__ :int = int(self.size["""shortest_edge"""] * h / w ) UpperCamelCase__ :Dict = self.size["""shortest_edge"""] elif w > h: UpperCamelCase__ :Tuple = self.size["""shortest_edge"""] UpperCamelCase__ :int = int(self.size["""shortest_edge"""] * w / h ) else: UpperCamelCase__ :Optional[Any] = self.size["""shortest_edge"""] UpperCamelCase__ :Any = self.size["""shortest_edge"""] else: UpperCamelCase__ :Any = [] for image in image_inputs: UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase__ :int = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[0] )[0] UpperCamelCase__ :List[str] = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCAmelCase_ ( lowercase , unittest.TestCase ): """simple docstring""" _snake_case : List[Any] = DeformableDetrImageProcessor if is_vision_available() else None def __a ( self :List[Any] ): UpperCamelCase__ :Union[str, Any] = DeformableDetrImageProcessingTester(self ) @property def __a ( self :Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def __a ( self :Optional[int] ): UpperCamelCase__ :Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """do_pad""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) ) def __a ( self :int ): UpperCamelCase__ :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} ) self.assertEqual(image_processor.do_pad , lowerCamelCase__ ) UpperCamelCase__ :str = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase__ ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , lowerCamelCase__ ) def __a ( self :Optional[int] ): pass def __a ( self :str ): # Initialize image_processing UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input UpperCamelCase__ :Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) UpperCamelCase__ :Tuple = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __a ( self :Optional[Any] ): # Initialize image_processing UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Any = self.image_processor_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :List[str] = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __a ( self :Tuple ): # Initialize image_processing UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :List[str] = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __a ( self :Optional[int] ): # prepare image and target UpperCamelCase__ :List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: UpperCamelCase__ :List[str] = json.loads(f.read() ) UpperCamelCase__ :Tuple = {"""image_id""": 3_97_69, """annotations""": target} # encode them UpperCamelCase__ :str = DeformableDetrImageProcessor() UpperCamelCase__ :Optional[Any] = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , return_tensors="""pt""" ) # verify pixel values UpperCamelCase__ :Any = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase__ ) UpperCamelCase__ :List[Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase__ , atol=1e-4 ) ) # verify area UpperCamelCase__ :Dict = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase__ ) ) # verify boxes UpperCamelCase__ :str = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase__ ) UpperCamelCase__ :List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase__ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :Any = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase__ ) ) # verify is_crowd UpperCamelCase__ :Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase__ ) ) # verify class_labels UpperCamelCase__ :Dict = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase__ ) ) # verify orig_size UpperCamelCase__ :Union[str, Any] = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase__ ) ) # verify size UpperCamelCase__ :int = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase__ ) ) @slow def __a ( self :Any ): # prepare image, target and masks_path UpperCamelCase__ :List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: UpperCamelCase__ :Tuple = json.loads(f.read() ) UpperCamelCase__ :Tuple = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target} UpperCamelCase__ :int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them UpperCamelCase__ :int = DeformableDetrImageProcessor(format="""coco_panoptic""" ) UpperCamelCase__ :List[Any] = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , masks_path=lowerCamelCase__ , return_tensors="""pt""" ) # verify pixel values UpperCamelCase__ :int = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase__ ) UpperCamelCase__ :Dict = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase__ , atol=1e-4 ) ) # verify area UpperCamelCase__ :str = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase__ ) ) # verify boxes UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase__ ) UpperCamelCase__ :Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase__ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :Dict = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase__ ) ) # verify is_crowd UpperCamelCase__ :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase__ ) ) # verify class_labels UpperCamelCase__ :Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase__ ) ) # verify masks UpperCamelCase__ :Optional[Any] = 82_28_73 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCamelCase__ ) # verify orig_size UpperCamelCase__ :List[Any] = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase__ ) ) # verify size UpperCamelCase__ :Dict = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase__ ) )
45
from __future__ import annotations import os from collections.abc import Mapping A_ = tuple[int, int] class __lowercase : def __init__( self : Dict , __lowerCamelCase : set[int] , __lowerCamelCase : Mapping[EdgeT, int] ) -> None: '''simple docstring''' lowercase = vertices lowercase = { (min(__lowerCamelCase ), max(__lowerCamelCase )): weight for edge, weight in edges.items() } def __a ( self : str , __lowerCamelCase : EdgeT , __lowerCamelCase : int ) -> None: '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowercase = weight def __a ( self : str ) -> Graph: '''simple docstring''' lowercase = Graph({min(self.vertices )} , {} ) lowercase = 42 lowercase = 42 lowercase = 42 lowercase = 42 while len(subgraph.vertices ) < len(self.vertices ): lowercase = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowercase = edge lowercase = weight subgraph.add_edge(__lowerCamelCase , __lowerCamelCase ) return subgraph def __UpperCAmelCase ( UpperCAmelCase = "p107_network.txt" )-> int: """simple docstring""" lowercase = os.path.abspath(os.path.dirname(UpperCAmelCase ) ) lowercase = os.path.join(UpperCAmelCase, UpperCAmelCase ) lowercase = {} lowercase = 42 lowercase = 42 lowercase = 42 with open(UpperCAmelCase ) as f: lowercase = f.read().strip().split('''\n''' ) lowercase = [line.split(''',''' ) for line in data] for edgea in range(1, len(UpperCAmelCase ) ): for edgea in range(UpperCAmelCase ): if adjaceny_matrix[edgea][edgea] != "-": lowercase = int(adjaceny_matrix[edgea][edgea] ) lowercase = Graph(set(range(len(UpperCAmelCase ) ) ), UpperCAmelCase ) lowercase = graph.prims_algorithm() lowercase = sum(graph.edges.values() ) lowercase = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"{solution() = }")
604
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig'] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['RemBertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['RemBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RemBertForCausalLM', 'RemBertForMaskedLM', 'RemBertForMultipleChoice', 'RemBertForQuestionAnswering', 'RemBertForSequenceClassification', 'RemBertForTokenClassification', 'RemBertLayer', 'RemBertModel', 'RemBertPreTrainedModel', 'load_tf_weights_in_rembert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRemBertForCausalLM', 'TFRemBertForMaskedLM', 'TFRemBertForMultipleChoice', 'TFRemBertForQuestionAnswering', 'TFRemBertForSequenceClassification', 'TFRemBertForTokenClassification', 'TFRemBertLayer', 'TFRemBertModel', 'TFRemBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
706
"""simple docstring""" from math import factorial def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ): '''simple docstring''' return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
681
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
93
'''simple docstring''' __lowerCAmelCase : List[str] ="Alexander Joslin" import operator as op from .stack import Stack def UpperCamelCase ( _lowerCamelCase : str ): A__ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub} A__ = Stack() A__ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(_lowerCamelCase ) ) elif i in operators: # RULE 2 operator_stack.push(_lowerCamelCase ) elif i == ")": # RULE 4 A__ = operator_stack.peek() operator_stack.pop() A__ = operand_stack.peek() operand_stack.pop() A__ = operand_stack.peek() operand_stack.pop() A__ = operators[opr](_lowerCamelCase , _lowerCamelCase ) operand_stack.push(_lowerCamelCase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": __lowerCAmelCase : Dict ="(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
440
0
import random from typing import Any def lowercase ( _a ) -> list[Any]: for _ in range(len(_a ) ): UpperCAmelCase_: str = random.randint(0 ,len(_a ) - 1 ) UpperCAmelCase_: Tuple = random.randint(0 ,len(_a ) - 1 ) UpperCAmelCase_: Optional[Any] = data[b], data[a] return data if __name__ == "__main__": _lowerCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7] _lowerCAmelCase = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
710
_lowerCAmelCase = frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _lowerCAmelCase = frozenset(["""prompt""", """negative_prompt"""]) _lowerCAmelCase = frozenset([]) _lowerCAmelCase = frozenset(["""image"""]) _lowerCAmelCase = frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _lowerCAmelCase = frozenset(["""image"""]) _lowerCAmelCase = frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _lowerCAmelCase = frozenset(["""prompt""", """image""", """negative_prompt"""]) _lowerCAmelCase = frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _lowerCAmelCase = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _lowerCAmelCase = frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _lowerCAmelCase = frozenset(["""image""", """mask_image"""]) _lowerCAmelCase = frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _lowerCAmelCase = frozenset(["""example_image""", """image""", """mask_image"""]) _lowerCAmelCase = frozenset(["""class_labels"""]) _lowerCAmelCase = frozenset(["""class_labels"""]) _lowerCAmelCase = frozenset(["""batch_size"""]) _lowerCAmelCase = frozenset([]) _lowerCAmelCase = frozenset(["""batch_size"""]) _lowerCAmelCase = frozenset([]) _lowerCAmelCase = frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _lowerCAmelCase = frozenset(["""prompt""", """negative_prompt"""]) _lowerCAmelCase = frozenset(["""input_tokens"""]) _lowerCAmelCase = frozenset(["""input_tokens"""])
306
0
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[int]: if num <= 0: raise ValueError('''Input must be a positive integer''' ) snake_case__ = [True] * (num + 1) snake_case__ = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , __lowerCAmelCase ): snake_case__ = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ : int = int(input("""Enter a positive integer: """).strip()) print(prime_sieve_eratosthenes(user_num))
33
"""simple docstring""" def A_ ( lowercase ) -> None: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = generate_pascal_triangle(lowercase ) for row_idx in range(lowercase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def A_ ( lowercase ) -> list[list[int]]: """simple docstring""" if not isinstance(lowercase , lowercase ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) UpperCAmelCase_ : list[list[int]] = [] for current_row_idx in range(lowercase ): UpperCAmelCase_ : Optional[Any] = populate_current_row(lowercase , lowercase ) triangle.append(lowercase ) return triangle def A_ ( lowercase , lowercase ) -> list[int]: """simple docstring""" UpperCAmelCase_ : List[Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 UpperCAmelCase_ ,UpperCAmelCase_ : List[Any] = 1, 1 for current_col_idx in range(1 , lowercase ): calculate_current_element( lowercase , lowercase , lowercase , lowercase ) return current_row def A_ ( lowercase , lowercase , lowercase , lowercase , ) -> None: """simple docstring""" UpperCAmelCase_ : str = triangle[current_row_idx - 1][current_col_idx - 1] UpperCAmelCase_ : int = triangle[current_row_idx - 1][current_col_idx] UpperCAmelCase_ : Any = above_to_left_elt + above_to_right_elt def A_ ( lowercase ) -> list[list[int]]: """simple docstring""" if not isinstance(lowercase , lowercase ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) UpperCAmelCase_ : list[list[int]] = [[1]] for row_index in range(1 , lowercase ): UpperCAmelCase_ : Any = [0] + result[-1] + [0] UpperCAmelCase_ : Union[str, Any] = row_index + 1 # Calculate the number of distinct elements in a row UpperCAmelCase_ : Dict = sum(divmod(lowercase , 2 ) ) UpperCAmelCase_ : List[str] = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] UpperCAmelCase_ : Union[str, Any] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() UpperCAmelCase_ : int = row_first_half + row_second_half result.append(lowercase ) return result def A_ ( ) -> None: """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowercase , lowercase ) -> None: UpperCAmelCase_ : int = f'''{func.__name__}({value})''' UpperCAmelCase_ : int = timeit(f'''__main__.{call}''' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowercase , lowercase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
470
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _lowercase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''The column name of the images in the files.'''} ) _lowerCamelCase: Optional[str] = field(default=_lowercase , metadata={'''help''': '''A folder containing the training data.'''} ) _lowerCamelCase: Optional[str] = field(default=_lowercase , metadata={'''help''': '''A folder containing the validation data.'''} ) _lowerCamelCase: Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) _lowerCamelCase: Optional[int] = field( default=_lowercase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) _lowerCamelCase: Optional[int] = field( default=_lowercase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: A = {} if self.train_dir is not None: A = self.train_dir if self.validation_dir is not None: A = self.validation_dir A = data_files if data_files else None @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = field( default=_lowercase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) _lowerCamelCase: str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) _lowerCamelCase: str = field(default=_lowercase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) _lowerCamelCase: bool = field( default=_lowercase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) _lowerCamelCase: float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) _lowerCamelCase: bool = field( default=_lowercase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( snake_case__ : Union[str, Any] ): A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A , A , A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , snake_case__ , snake_case__ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A = training_args.get_process_log_level() logger.setLevel(snake_case__ ) transformers.utils.logging.set_verbosity(snake_case__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0: A = ds['train'].train_test_split(data_args.train_val_split ) A = split['train'] A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: A = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case__ ) elif model_args.model_name_or_path: A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ ) else: A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ ) elif model_args.model_name_or_path: A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ ) else: A = ViTImageProcessor() # create model if model_args.model_name_or_path: A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) A = ViTMAEForPreTraining(snake_case__ ) if training_args.do_train: A = ds['train'].column_names else: A = ds['validation'].column_names if data_args.image_column_name is not None: A = data_args.image_column_name elif "image" in column_names: A = 'image' elif "img" in column_names: A = 'img' else: A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: A = image_processor.size['shortest_edge'] else: A = (image_processor.size['height'], image_processor.size['width']) A = Compose( [ Lambda(lambda snake_case__ : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(snake_case__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(snake_case__ : Dict ): A = [transforms(snake_case__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case__ ) # Compute absolute learning rate A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: A = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer A = Trainer( model=snake_case__ , args=snake_case__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , ) # Training if training_args.do_train: A = None if training_args.resume_from_checkpoint is not None: A = training_args.resume_from_checkpoint elif last_checkpoint is not None: A = last_checkpoint A = trainer.train(resume_from_checkpoint=snake_case__ ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: A = trainer.evaluate() trainer.log_metrics('eval' , snake_case__ ) trainer.save_metrics('eval' , snake_case__ ) # Write model card and (optionally) push to hub A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case__ ) else: trainer.create_model_card(**snake_case__ ) def _snake_case ( snake_case__ : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
701
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _lowercase = 8 def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ): A = x.device A = (x * 255).int().clamp(0 , 255 ) A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' ) A = ((x & mask) != 0).float() A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' ) A = bits * 2 - 1 return bits def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ): A = x.device A = (x > 0).int() A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa ) A = rearrange(snake_case__ , 'd -> d 1 1' ) A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 ) A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 255).clamp(0.0 , 1.0 ) def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ): if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) A = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas A = self.alphas_cumprod[timestep] A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod A = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) A = self._get_variance(snake_case__ , snake_case__ ) A = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu' A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ ) A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise A = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ): A = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 ) else: A = None # 1. compute alphas, betas A = self.alphas_cumprod[t] A = self.alphas_cumprod[t - 1] if t > 0 else self.one A = 1 - alpha_prod_t A = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": A = model_output else: raise ValueError(F'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" A = self.bit_scale if self.config.clip_sample: A = torch.clamp(snake_case__ , -scale , snake_case__ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A = 0 if t > 0: A = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device ) A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise A = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]: super().__init__() A = bit_scale A = ( ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=A_ ,scheduler=A_ ) @torch.no_grad() def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]: A = torch.randn( (batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,) A = decimal_to_bits(A_ ) * self.bit_scale A = latents.to(self.device ) self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual A = self.unet(A_ ,A_ ).sample # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample A = bits_to_decimal(A_ ) if output_type == "pil": A = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
22
0
from pathlib import Path import fire from tqdm import tqdm def a__ ( snake_case="ro" , snake_case="en" , snake_case="wmt16" , snake_case=None ): """simple docstring""" try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''' ) __SCREAMING_SNAKE_CASE : str = F'''{src_lang}-{tgt_lang}''' print(F'''Converting {dataset}-{pair}''' ) __SCREAMING_SNAKE_CASE : Dict = datasets.load_dataset(snake_case , snake_case ) if save_dir is None: __SCREAMING_SNAKE_CASE : List[str] = F'''{dataset}-{pair}''' __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) save_dir.mkdir(exist_ok=snake_case ) for split in ds.keys(): print(F'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets __SCREAMING_SNAKE_CASE : Optional[int] = '''val''' if split == '''validation''' else split __SCREAMING_SNAKE_CASE : Union[str, Any] = save_dir.joinpath(F'''{fn}.source''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = save_dir.joinpath(F'''{fn}.target''' ) __SCREAMING_SNAKE_CASE : Dict = src_path.open('''w+''' ) __SCREAMING_SNAKE_CASE : int = tgt_path.open('''w+''' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): __SCREAMING_SNAKE_CASE : Tuple = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''' ) tgt_fp.write(ex[tgt_lang] + '''\n''' ) print(F'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
74
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __A : """simple docstring""" UpperCAmelCase__ = BlenderbotSmallConfig UpperCAmelCase__ = {} UpperCAmelCase__ = """gelu""" def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=False , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__=0.1 , a__=0.1 , a__=20 , a__=2 , a__=1 , a__=0 , ): """simple docstring""" _lowerCamelCase : str = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : str = seq_length _lowerCamelCase : Optional[int] = is_training _lowerCamelCase : int = use_labels _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : List[Any] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[Any] = num_attention_heads _lowerCamelCase : int = intermediate_size _lowerCamelCase : int = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Dict = max_position_embeddings _lowerCamelCase : str = eos_token_id _lowerCamelCase : Any = pad_token_id _lowerCamelCase : str = bos_token_id def __snake_case ( self): """simple docstring""" _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) _lowerCamelCase : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) _lowerCamelCase : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1) _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _lowerCamelCase : Dict = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _lowerCamelCase : Optional[int] = prepare_blenderbot_small_inputs_dict(a__ , a__ , a__) return config, inputs_dict def __snake_case ( self , a__ , a__): """simple docstring""" _lowerCamelCase : Optional[int] = TFBlenderbotSmallModel(config=a__).get_decoder() _lowerCamelCase : Dict = inputs_dict['''input_ids'''] _lowerCamelCase : int = input_ids[:1, :] _lowerCamelCase : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :] _lowerCamelCase : List[str] = inputs_dict['''head_mask'''] _lowerCamelCase : Optional[int] = 1 # first forward pass _lowerCamelCase : Union[str, Any] = model(a__ , attention_mask=a__ , head_mask=a__ , use_cache=a__) _lowerCamelCase, _lowerCamelCase : str = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size) _lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and _lowerCamelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1) _lowerCamelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1) _lowerCamelCase : List[str] = model(a__ , attention_mask=a__)[0] _lowerCamelCase : str = model(a__ , attention_mask=a__ , past_key_values=a__)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice _lowerCamelCase : int = int(ids_tensor((1,) , output_from_past.shape[-1])) _lowerCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx] _lowerCamelCase : int = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(a__ , a__ , rtol=1e-3) def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ): if attention_mask is None: _lowerCamelCase : List[Any] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _lowerCamelCase : Union[str, Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _lowerCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowerCamelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowerCamelCase : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __A ( lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ): """simple docstring""" UpperCAmelCase__ = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) UpperCAmelCase__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase__ = ( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = False def __snake_case ( self): """simple docstring""" _lowerCamelCase : List[str] = TFBlenderbotSmallModelTester(self) _lowerCamelCase : Optional[int] = ConfigTester(self , config_class=a__) def __snake_case ( self): """simple docstring""" self.config_tester.run_common_tests() def __snake_case ( self): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*a__) @require_tokenizers @require_tf class __A ( unittest.TestCase ): """simple docstring""" UpperCAmelCase__ = [ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i'm going to throw up.\nand why is that?""" ] UpperCAmelCase__ = """facebook/blenderbot_small-90M""" @cached_property def __snake_case ( self): """simple docstring""" return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''') @cached_property def __snake_case ( self): """simple docstring""" _lowerCamelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model @slow def __snake_case ( self): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.tokenizer(self.src_text , return_tensors='''tf''') _lowerCamelCase : str = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=a__ , ) _lowerCamelCase : Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=a__)[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
114
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self : str ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = (32, 32) __SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase ) return image @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=lowerCamelCase ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,) return model @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) return model @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,) return CLIPTextModel(lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale __SCREAMING_SNAKE_CASE = DDPMScheduler() __SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type="""v_prediction""" ) __SCREAMING_SNAKE_CASE = self.dummy_vae __SCREAMING_SNAKE_CASE = self.dummy_text_encoder __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0] __SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline( unet=lowerCamelCase ,low_res_scheduler=lowerCamelCase ,scheduler=lowerCamelCase ,vae=lowerCamelCase ,text_encoder=lowerCamelCase ,tokenizer=lowerCamelCase ,max_noise_level=350 ,) __SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = sd_pipe( [prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = sd_pipe( [prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase ,)[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) __SCREAMING_SNAKE_CASE = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale __SCREAMING_SNAKE_CASE = DDPMScheduler() __SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type="""v_prediction""" ) __SCREAMING_SNAKE_CASE = self.dummy_vae __SCREAMING_SNAKE_CASE = self.dummy_text_encoder __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0] __SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline( unet=lowerCamelCase ,low_res_scheduler=lowerCamelCase ,scheduler=lowerCamelCase ,vae=lowerCamelCase ,text_encoder=lowerCamelCase ,tokenizer=lowerCamelCase ,max_noise_level=350 ,) __SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = sd_pipe( 2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,) __SCREAMING_SNAKE_CASE = output.images assert image.shape[0] == 2 __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = sd_pipe( [prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,) __SCREAMING_SNAKE_CASE = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale __SCREAMING_SNAKE_CASE = DDPMScheduler() __SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type="""v_prediction""" ) __SCREAMING_SNAKE_CASE = self.dummy_vae __SCREAMING_SNAKE_CASE = self.dummy_text_encoder __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0] __SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 __SCREAMING_SNAKE_CASE = unet.half() __SCREAMING_SNAKE_CASE = text_encoder.half() # make sure here that pndm scheduler skips prk __SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline( unet=lowerCamelCase ,low_res_scheduler=lowerCamelCase ,scheduler=lowerCamelCase ,vae=lowerCamelCase ,text_encoder=lowerCamelCase ,tokenizer=lowerCamelCase ,max_noise_level=350 ,) __SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sd_pipe( [prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,num_inference_steps=2 ,output_type="""np""" ,).images __SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) __SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) __SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-x4-upscaler""" __SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE = """a cat sitting on a park bench""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=lowerCamelCase ,image=lowerCamelCase ,generator=lowerCamelCase ,output_type="""np""" ,) __SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) __SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) __SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-x4-upscaler""" __SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained( lowerCamelCase ,torch_dtype=torch.floataa ,) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE = """a cat sitting on a park bench""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=lowerCamelCase ,image=lowerCamelCase ,generator=lowerCamelCase ,output_type="""np""" ,) __SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def UpperCAmelCase__ ( self : Any ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) __SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-x4-upscaler""" __SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained( lowerCamelCase ,torch_dtype=torch.floataa ,) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __SCREAMING_SNAKE_CASE = """a cat sitting on a park bench""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=lowerCamelCase ,image=lowerCamelCase ,generator=lowerCamelCase ,num_inference_steps=5 ,output_type="""np""" ,) __SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
13
'''simple docstring''' import requests from bsa import BeautifulSoup def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" ) __SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} ) __SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" ) return anchors[2].get_text() if __name__ == "__main__": a = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
13
1
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase_ : def __init__( self : Tuple , __A : List[Any] , __A : List[Any]=13 , __A : List[str]=30 , __A : Optional[int]=2 , __A : Optional[Any]=3 , __A : Optional[Any]=True , __A : List[Any]=True , __A : Dict=32 , __A : List[str]=5 , __A : Tuple=4 , __A : Dict=37 , __A : List[Any]="gelu" , __A : str=0.1 , __A : Dict=0.1 , __A : Optional[Any]=10 , __A : Optional[int]=0.0_2 , __A : int=3 , __A : Union[str, Any]=None , __A : Union[str, Any]=2 , ): __A : List[Any] = parent __A : List[Any] = batch_size __A : List[str] = image_size __A : Optional[Any] = patch_size __A : Union[str, Any] = num_channels __A : Union[str, Any] = is_training __A : Any = use_labels __A : Any = hidden_size __A : str = num_hidden_layers __A : List[Any] = num_attention_heads __A : Optional[Any] = intermediate_size __A : Dict = hidden_act __A : str = hidden_dropout_prob __A : Optional[int] = attention_probs_dropout_prob __A : Union[str, Any] = type_sequence_label_size __A : Union[str, Any] = initializer_range __A : Optional[int] = scope __A : List[str] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __A : Tuple = (image_size // patch_size) ** 2 __A : int = num_patches + 2 def lowerCAmelCase_ ( self : Dict ): __A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __A : List[str] = None if self.use_labels: __A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __A : str = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self : str ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase_ ( self : str , __A : Dict , __A : List[str] , __A : Any ): __A : Any = DeiTModel(config=__A ) model.to(__A ) model.eval() __A : Optional[int] = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : str , __A : Dict , __A : int , __A : Tuple ): __A : List[str] = DeiTForMaskedImageModeling(config=__A ) model.to(__A ) model.eval() __A : int = model(__A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __A : Dict = 1 __A : Tuple = DeiTForMaskedImageModeling(__A ) model.to(__A ) model.eval() __A : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __A : Union[str, Any] = model(__A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self : int , __A : int , __A : Any , __A : List[Any] ): __A : Union[str, Any] = self.type_sequence_label_size __A : Optional[Any] = DeiTForImageClassification(__A ) model.to(__A ) model.eval() __A : Union[str, Any] = model(__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __A : Optional[int] = 1 __A : Union[str, Any] = DeiTForImageClassification(__A ) model.to(__A ) model.eval() __A : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __A : Optional[Any] = model(__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self : int ): __A : Optional[Any] = self.prepare_config_and_inputs() ( ( __A ) , ( __A ) , ( __A ) , ) : Optional[int] = config_and_inputs __A : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ): _lowercase : Optional[int] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _lowercase : Dict = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _lowercase : Any = False _lowercase : int = False _lowercase : Optional[int] = False def lowerCAmelCase_ ( self : str ): __A : Dict = DeiTModelTester(self ) __A : int = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 ) def lowerCAmelCase_ ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def lowerCAmelCase_ ( self : Any ): pass def lowerCAmelCase_ ( self : int ): __A , __A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : Any = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , nn.Linear ) ) def lowerCAmelCase_ ( self : str ): __A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : Optional[Any] = model_class(__A ) __A : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A : Dict = [*signature.parameters.keys()] __A : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __A ) def lowerCAmelCase_ ( self : Any ): __A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def lowerCAmelCase_ ( self : Tuple ): __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__A ) def lowerCAmelCase_ ( self : str ): __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def lowerCAmelCase_ ( self : Optional[Any] , __A : int , __A : Dict , __A : Optional[Any]=False ): __A : Any = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCAmelCase_ ( self : Optional[int] ): if not self.model_tester.is_training: return __A , __A : int = self.model_tester.prepare_config_and_inputs_for_common() __A : Any = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__A ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue __A : List[str] = model_class(__A ) model.to(__A ) model.train() __A : Dict = self._prepare_for_class(__A , __A , return_labels=__A ) __A : Any = model(**__A ).loss loss.backward() def lowerCAmelCase_ ( self : Tuple ): __A , __A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __A : Optional[Any] = False __A : List[Any] = True for model_class in self.all_model_classes: if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue __A : Any = model_class(__A ) model.gradient_checkpointing_enable() model.to(__A ) model.train() __A : Union[str, Any] = self._prepare_for_class(__A , __A , return_labels=__A ) __A : int = model(**__A ).loss loss.backward() def lowerCAmelCase_ ( self : List[Any] ): __A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __A : str = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__A ), *get_values(__A ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): __A : Tuple = problem_type["""title"""] __A : Optional[int] = problem_type["""num_labels"""] __A : Union[str, Any] = model_class(__A ) model.to(__A ) model.train() __A : int = self._prepare_for_class(__A , __A , return_labels=__A ) if problem_type["num_labels"] > 1: __A : int = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) __A : Optional[Any] = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__A ) as warning_list: __A : List[str] = model(**__A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def lowerCAmelCase_ ( self : Union[str, Any] ): for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A : Optional[Any] = DeiTModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __SCREAMING_SNAKE_CASE ( ) -> Any: __A : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowerCAmelCase_ ( self : List[Any] ): return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self : Optional[int] ): __A : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to( __A ) __A : int = self.default_image_processor __A : List[str] = prepare_img() __A : Optional[Any] = image_processor(images=__A , return_tensors="""pt""" ).to(__A ) # forward pass with torch.no_grad(): __A : int = model(**__A ) # verify the logits __A : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __A ) __A : str = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def lowerCAmelCase_ ( self : str ): __A : List[str] = DeiTModel.from_pretrained( """facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" ) __A : Union[str, Any] = self.default_image_processor __A : Any = prepare_img() __A : List[str] = image_processor(images=__A , return_tensors="""pt""" ) __A : List[Any] = inputs.pixel_values.to(__A ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __A : Tuple = model(__A )
17
'''simple docstring''' from math import pow def UpperCAmelCase ( A : int , A : int , A : int , A : int , A : int , ): if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count SCREAMING_SNAKE_CASE : Tuple = int(pow(A , A ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = backtrack( A , A , current_number + 1 , A , A ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = backtrack( A , A , current_number + 1 , A , A ) return current_sum, solutions_count def UpperCAmelCase ( A : int , A : int ): if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(A , A , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
527
0
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html _lowerCamelCase : Union[str, Any] = '''platform''' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase : """simple docstring""" UpperCAmelCase_ = PegasusConfig UpperCAmelCase_ = {} UpperCAmelCase_ = "gelu" def __init__( self : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=1_3, _UpperCAmelCase : Optional[int]=7, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[Any]=False, _UpperCAmelCase : str=9_9, _UpperCAmelCase : List[str]=3_2, _UpperCAmelCase : Tuple=5, _UpperCAmelCase : str=4, _UpperCAmelCase : Dict=3_7, _UpperCAmelCase : int=0.1, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Any=2_0, _UpperCAmelCase : Any=2, _UpperCAmelCase : List[Any]=1, _UpperCAmelCase : Union[str, Any]=0, ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = parent SCREAMING_SNAKE_CASE__ : List[Any] = batch_size SCREAMING_SNAKE_CASE__ : Any = seq_length SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training SCREAMING_SNAKE_CASE__ : int = use_labels SCREAMING_SNAKE_CASE__ : List[str] = vocab_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers SCREAMING_SNAKE_CASE__ : int = num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[int] = eos_token_id SCREAMING_SNAKE_CASE__ : Union[str, Any] = pad_token_id SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id def A_ ( self : int ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ).clip(3, self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ), 1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.concatenate([input_ids, eos_tensor], axis=1 ) SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE__ : str = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_pegasus_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) return config, inputs_dict def A_ ( self : Union[str, Any], _UpperCAmelCase : int, _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[str] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = 2_0 SCREAMING_SNAKE_CASE__ : List[str] = model_class_name(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = model.encode(inputs_dict["input_ids"] ) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) SCREAMING_SNAKE_CASE__ : Optional[int] = model.init_cache(decoder_input_ids.shape[0], _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4" ) SCREAMING_SNAKE_CASE__ : Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) SCREAMING_SNAKE_CASE__ : str = model.decode( decoder_input_ids[:, :-1], _UpperCAmelCase, decoder_attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase, decoder_position_ids=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4" ) SCREAMING_SNAKE_CASE__ : List[str] = model.decode( decoder_input_ids[:, -1:], _UpperCAmelCase, decoder_attention_mask=_UpperCAmelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Tuple = model.decode(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' ) def A_ ( self : Union[str, Any], _UpperCAmelCase : List[str], _UpperCAmelCase : str, _UpperCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = 2_0 SCREAMING_SNAKE_CASE__ : Tuple = model_class_name(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = model.encode(inputs_dict["input_ids"] ) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) SCREAMING_SNAKE_CASE__ : Dict = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ], axis=-1, ) SCREAMING_SNAKE_CASE__ : List[Any] = model.init_cache(decoder_input_ids.shape[0], _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) SCREAMING_SNAKE_CASE__ : Any = model.decode( decoder_input_ids[:, :-1], _UpperCAmelCase, decoder_attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase, decoder_position_ids=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4" ) SCREAMING_SNAKE_CASE__ : str = model.decode( decoder_input_ids[:, -1:], _UpperCAmelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=_UpperCAmelCase, decoder_position_ids=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : List[str] = model.decode(_UpperCAmelCase, _UpperCAmelCase, decoder_attention_mask=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' ) def _a ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=None , ) -> Optional[Any]: '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = np.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ : Any = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase (__lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) UpperCAmelCase_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () UpperCAmelCase_ = True UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False def A_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = FlaxPegasusModelTester(self ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ConfigTester(self, config_class=_UpperCAmelCase ) def A_ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def A_ ( self : Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) def A_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) def A_ ( self : Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase ) @jax.jit def encode_jitted(_UpperCAmelCase : str, _UpperCAmelCase : Dict=None, **_UpperCAmelCase : Optional[Any] ): return model.encode(input_ids=_UpperCAmelCase, attention_mask=_UpperCAmelCase ) with self.subTest("JIT Enabled" ): SCREAMING_SNAKE_CASE__ : Any = encode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): SCREAMING_SNAKE_CASE__ : Optional[Any] = encode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase, _UpperCAmelCase ): self.assertEqual(jitted_output.shape, output.shape ) def A_ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"] ) SCREAMING_SNAKE_CASE__ : Any = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(_UpperCAmelCase : List[str], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ): return model.decode( decoder_input_ids=_UpperCAmelCase, decoder_attention_mask=_UpperCAmelCase, encoder_outputs=_UpperCAmelCase, ) with self.subTest("JIT Enabled" ): SCREAMING_SNAKE_CASE__ : int = decode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): SCREAMING_SNAKE_CASE__ : str = decode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase, _UpperCAmelCase ): self.assertEqual(jitted_output.shape, output.shape ) @slow def A_ ( self : Optional[Any] ) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class_name.from_pretrained("google/pegasus-large", from_pt=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = np.ones((1, 1) ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @slow def A_ ( self : str ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" ) SCREAMING_SNAKE_CASE__ : int = PegasusTokenizer.from_pretrained("google/pegasus-xsum" ) SCREAMING_SNAKE_CASE__ : List[Any] = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] SCREAMING_SNAKE_CASE__ : Tuple = [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(_UpperCAmelCase, return_tensors="np", truncation=_UpperCAmelCase, max_length=5_1_2, padding=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = model.generate(**_UpperCAmelCase, num_beams=2 ).sequences SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.batch_decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) assert tgt_text == decoded
157
import math def _a ( SCREAMING_SNAKE_CASE__ : int ) -> bool: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : float = 1 / 1_23_45 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : List[str] = 3 while True: SCREAMING_SNAKE_CASE__ : Optional[int] = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : Any = int(SCREAMING_SNAKE_CASE__ ) total_partitions += 1 if check_partition_perfect(SCREAMING_SNAKE_CASE__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(SCREAMING_SNAKE_CASE__ ) integer += 1 if __name__ == "__main__": print(f"{solution() = }")
157
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def UpperCamelCase( ): lowerCAmelCase_ : List[Any] = ArgumentParser( description=( '''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' ,type=a__ ,default=1 ,help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' ,type=a__ ,help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) ,) # rest from the training program parser.add_argument('''training_script_args''' ,nargs=a__ ) return parser.parse_args() def UpperCamelCase( ): lowerCAmelCase_ : Union[str, Any] = parse_args() # Import training_script as a module. lowerCAmelCase_ : List[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowerCAmelCase_ : str = script_fpath.stem lowerCAmelCase_ : int = importlib.import_module(a__ ) # Patch sys.argv lowerCAmelCase_ : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores ) if __name__ == "__main__": main()
171
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex: __A : Tuple = symbols(a__ ) __A : List[str] = lambdify(a__ ,a__ ) __A : Any = lambdify(a__ ,diff(a__ ,a__ ) ) __A : Dict = starting_point while True: if diff_function(a__ ) != 0: __A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function( a__ ) else: raise ZeroDivisionError("""Could not find root""" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess __A : List[Any] = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial # Find fourth Root of 5 print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""") # Find value of e print( '''The root of log(y) - 1 = 0 is ''', f"""{newton_raphson("log(y) - 1", 2, variable="y")}""", ) # Exponential Roots print( '''The root of exp(x) - 1 = 0 is''', f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""", ) # Find root of cos(x) print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
17
0
"""simple docstring""" from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class _SCREAMING_SNAKE_CASE ( A__ , A__ ): @register_to_config def __init__( self , __A , __A = None , __A = None ) -> str: super().__init__() lowerCAmelCase_ :List[Any] = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" lowerCAmelCase_ :Tuple = torch.zeros(__A , __A ) else: lowerCAmelCase_ :Dict = None lowerCAmelCase_ :List[Any] = torch.nn.Parameter(__A ) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :VQModel UpperCAmelCase_ :CLIPTextModel UpperCAmelCase_ :CLIPTokenizer UpperCAmelCase_ :TransformeraDModel UpperCAmelCase_ :LearnedClassifierFreeSamplingEmbeddings UpperCAmelCase_ :VQDiffusionScheduler def __init__( self , __A , __A , __A , __A , __A , __A , ) -> Optional[Any]: super().__init__() self.register_modules( vqvae=__A , transformer=__A , text_encoder=__A , tokenizer=__A , scheduler=__A , learned_classifier_free_sampling_embeddings=__A , ) def __lowerCAmelCase ( self , __A , __A , __A ) -> List[str]: lowerCAmelCase_ :List[str] = len(__A ) if isinstance(__A , __A ) else 1 # get prompt text embeddings lowerCAmelCase_ :Any = self.tokenizer( __A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) lowerCAmelCase_ :Optional[int] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowerCAmelCase_ :Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) lowerCAmelCase_ :Tuple = text_input_ids[:, : self.tokenizer.model_max_length] lowerCAmelCase_ :Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 lowerCAmelCase_ :List[str] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__A ) # duplicate text embeddings for each generation per prompt lowerCAmelCase_ :Optional[int] = prompt_embeds.repeat_interleave(__A , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: lowerCAmelCase_ :int = self.learned_classifier_free_sampling_embeddings.embeddings lowerCAmelCase_ :Any = negative_prompt_embeds.unsqueeze(0 ).repeat(__A , 1 , 1 ) else: lowerCAmelCase_ :List[str] = [""""""] * batch_size lowerCAmelCase_ :Union[str, Any] = text_input_ids.shape[-1] lowerCAmelCase_ :Union[str, Any] = self.tokenizer( __A , padding="""max_length""" , max_length=__A , truncation=__A , return_tensors="""pt""" , ) lowerCAmelCase_ :List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings lowerCAmelCase_ :Union[str, Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__A ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowerCAmelCase_ :str = negative_prompt_embeds.shape[1] lowerCAmelCase_ :Union[str, Any] = negative_prompt_embeds.repeat(1 , __A , 1 ) lowerCAmelCase_ :Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCAmelCase_ :Optional[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , __A , __A = 100 , __A = 5.0 , __A = 1.0 , __A = 1 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , ) -> Union[ImagePipelineOutput, Tuple]: if isinstance(__A , __A ): lowerCAmelCase_ :Optional[Any] = 1 elif isinstance(__A , __A ): lowerCAmelCase_ :str = len(__A ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__A )}""" ) lowerCAmelCase_ :List[str] = batch_size * num_images_per_prompt lowerCAmelCase_ :str = guidance_scale > 1.0 lowerCAmelCase_ :Optional[Any] = self._encode_prompt(__A , __A , __A ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__A )}.""" ) # get the initial completely masked latents unless the user supplied it lowerCAmelCase_ :str = (batch_size, self.transformer.num_latent_pixels) if latents is None: lowerCAmelCase_ :Any = self.transformer.num_vector_embeds - 1 lowerCAmelCase_ :Tuple = torch.full(__A , __A ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( """Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,""" f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" ) lowerCAmelCase_ :Optional[Any] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__A , device=self.device ) lowerCAmelCase_ :int = self.scheduler.timesteps.to(self.device ) lowerCAmelCase_ :Tuple = latents for i, t in enumerate(self.progress_bar(__A ) ): # expand the sample if we are doing classifier free guidance lowerCAmelCase_ :Optional[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` lowerCAmelCase_ :Optional[int] = self.transformer(__A , encoder_hidden_states=__A , timestep=__A ).sample if do_classifier_free_guidance: lowerCAmelCase_ , lowerCAmelCase_ :int = model_output.chunk(2 ) lowerCAmelCase_ :Tuple = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__A , dim=1 , keepdim=__A ) lowerCAmelCase_ :Optional[Any] = self.truncate(__A , __A ) # remove `log(0)`'s (`-inf`s) lowerCAmelCase_ :List[Any] = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase_ :Optional[int] = self.scheduler.step(__A , timestep=__A , sample=__A , generator=__A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__A , __A , __A ) lowerCAmelCase_ :List[Any] = self.vqvae.config.vq_embed_dim lowerCAmelCase_ :Tuple = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) lowerCAmelCase_ :Optional[Any] = self.vqvae.quantize.get_codebook_entry(__A , shape=__A ) lowerCAmelCase_ :Union[str, Any] = self.vqvae.decode(__A , force_not_quantize=__A ).sample lowerCAmelCase_ :Any = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase_ :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase_ :Dict = self.numpy_to_pil(__A ) if not return_dict: return (image,) return ImagePipelineOutput(images=__A ) def __lowerCAmelCase ( self , __A , __A ) -> torch.FloatTensor: lowerCAmelCase_ , lowerCAmelCase_ :int = torch.sort(__A , 1 , descending=__A ) lowerCAmelCase_ :Tuple = torch.exp(__A ) lowerCAmelCase_ :List[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out lowerCAmelCase_ :List[Any] = torch.full_like(keep_mask[:, 0:1, :] , __A ) lowerCAmelCase_ :Any = torch.cat((all_true, keep_mask) , dim=1 ) lowerCAmelCase_ :List[str] = keep_mask[:, :-1, :] lowerCAmelCase_ :Any = keep_mask.gather(1 , indices.argsort(1 ) ) lowerCAmelCase_ :Union[str, Any] = log_p_x_0.clone() lowerCAmelCase_ :Optional[int] = -torch.inf # -inf = log(0) return rv
256
"""simple docstring""" import numpy as np def _snake_case ( lowercase__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return 1 / (1 + np.exp(-vector )) def _snake_case ( lowercase__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return vector * sigmoid(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
256
1
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder __lowerCAmelCase : List[str] = '''__DUMMY_TRANSFORMERS_USER__''' __lowerCAmelCase : Dict = '''Dummy User''' __lowerCAmelCase : Dict = '''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''' __lowerCAmelCase : Dict = '''https://hub-ci.huggingface.co''' __lowerCAmelCase : List[str] = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' __lowerCAmelCase : int = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' __lowerCAmelCase : Optional[Any] = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ): '''simple docstring''' monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCamelCase ) @pytest.fixture def __lowerCAmelCase ( __UpperCamelCase : Tuple ): '''simple docstring''' monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCamelCase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCamelCase ) @pytest.fixture def __lowerCAmelCase ( __UpperCamelCase : Tuple ): '''simple docstring''' monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCamelCase ) @pytest.fixture def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ): '''simple docstring''' HfFolder.save_token(__UpperCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __lowerCAmelCase ( ): '''simple docstring''' return HfApi(endpoint=__UpperCamelCase ) @pytest.fixture(scope="""session""" ) def __lowerCAmelCase ( __UpperCamelCase : HfApi ): '''simple docstring''' snake_case_ : List[Any] = HfFolder.get_token() HfFolder.save_token(__UpperCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(__UpperCamelCase ) @pytest.fixture def __lowerCAmelCase ( __UpperCamelCase : str ): '''simple docstring''' def _cleanup_repo(__UpperCamelCase : int ): hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __lowerCAmelCase ( __UpperCamelCase : str ): '''simple docstring''' @contextmanager def _temporary_repo(__UpperCamelCase : int ): try: yield repo_id finally: cleanup_repo(__UpperCamelCase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __lowerCAmelCase ( __UpperCamelCase : HfApi , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] ): '''simple docstring''' snake_case_ : List[str] = F'repo_txt_data-{int(time.time() * 1_0E3 )}' snake_case_ : Optional[int] = F'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" , private=__UpperCamelCase ) hf_api.upload_file( token=__UpperCamelCase , path_or_fileobj=str(__UpperCamelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCamelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ): '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCAmelCase ( __UpperCamelCase : HfApi , __UpperCamelCase : str , __UpperCamelCase : str ): '''simple docstring''' snake_case_ : Optional[Any] = F'repo_zipped_txt_data-{int(time.time() * 1_0E3 )}' snake_case_ : Tuple = F'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" , private=__UpperCamelCase ) hf_api.upload_file( token=__UpperCamelCase , path_or_fileobj=str(__UpperCamelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCamelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] ): '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCAmelCase ( __UpperCamelCase : HfApi , __UpperCamelCase : int , __UpperCamelCase : Tuple ): '''simple docstring''' snake_case_ : Any = F'repo_zipped_img_data-{int(time.time() * 1_0E3 )}' snake_case_ : str = F'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" , private=__UpperCamelCase ) hf_api.upload_file( token=__UpperCamelCase , path_or_fileobj=str(__UpperCamelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCamelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Any ): '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
58
from __future__ import annotations def UpperCamelCase__ ( _A: float , _A: float , _A: float ): '''simple docstring''' if days_between_payments <= 0: raise ValueError("""days_between_payments must be > 0""" ) if daily_interest_rate < 0: raise ValueError("""daily_interest_rate must be >= 0""" ) if principal <= 0: raise ValueError("""principal must be > 0""" ) return principal * daily_interest_rate * days_between_payments def UpperCamelCase__ ( _A: float , _A: float , _A: float , ): '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError("""number_of_compounding_periods must be > 0""" ) if nominal_annual_interest_rate_percentage < 0: raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" ) if principal <= 0: raise ValueError("""principal must be > 0""" ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def UpperCamelCase__ ( _A: float , _A: float , _A: float , ): '''simple docstring''' if number_of_years <= 0: raise ValueError("""number_of_years must be > 0""" ) if nominal_annual_percentage_rate < 0: raise ValueError("""nominal_annual_percentage_rate must be >= 0""" ) if principal <= 0: raise ValueError("""principal must be > 0""" ) return compound_interest( _A , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
479
0
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class A_ : '''simple docstring''' UpperCAmelCase_ : int UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def UpperCamelCase( ): UpperCAmelCase : str = Node(1 ) UpperCAmelCase : Optional[Any] = Node(2 ) UpperCAmelCase : Dict = Node(3 ) UpperCAmelCase : int = Node(4 ) UpperCAmelCase : str = Node(5 ) return tree def UpperCamelCase( UpperCAmelCase_ ): return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def UpperCamelCase( UpperCAmelCase_ ): return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def UpperCamelCase( UpperCAmelCase_ ): return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def UpperCamelCase( UpperCAmelCase_ ): return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : list[Any] = [] if root is None: return output UpperCAmelCase : int = deque([root] ) while process_queue: UpperCAmelCase : str = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : list[Any] = [] def populate_output(UpperCAmelCase_ , UpperCAmelCase_ ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(UpperCAmelCase_ , UpperCAmelCase_ ) return output def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : list[Any] = [] def populate_output(UpperCAmelCase_ , UpperCAmelCase_ ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(UpperCAmelCase_ , UpperCAmelCase_ ) return output def UpperCamelCase( UpperCAmelCase_ ): if root is None: return [] UpperCAmelCase : list[Sequence[Node | None]] = [] UpperCAmelCase : Any = 0 UpperCAmelCase : Optional[Any] = height(UpperCAmelCase_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(UpperCAmelCase_ , UpperCAmelCase_ ) ) UpperCAmelCase : List[str] = 1 else: output.append(get_nodes_from_right_to_left(UpperCAmelCase_ , UpperCAmelCase_ ) ) UpperCAmelCase : int = 0 return output def UpperCamelCase( ): # Main function for testing. UpperCAmelCase : Tuple = make_tree() print(F"""In-order Traversal: {inorder(UpperCAmelCase_ )}""" ) print(F"""Pre-order Traversal: {preorder(UpperCAmelCase_ )}""" ) print(F"""Post-order Traversal: {postorder(UpperCAmelCase_ )}""" , '\n' ) print(F"""Height of Tree: {height(UpperCAmelCase_ )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(UpperCAmelCase_ ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(UpperCAmelCase_ ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(UpperCAmelCase_ , level=UpperCAmelCase_ ) ) print('\nZigZag order Traversal: ' ) print(zigzag(UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
695
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowercase__ = "Create a default config file for Accelerate with only a few flags set." def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ): UpperCAmelCase : Any = Path(UpperCAmelCase_ ) path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) if path.exists(): print( F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False UpperCAmelCase : Optional[int] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) UpperCAmelCase : Dict = { 'compute_environment': 'LOCAL_MACHINE', 'mixed_precision': mixed_precision, } if torch.cuda.is_available(): UpperCAmelCase : Dict = torch.cuda.device_count() UpperCAmelCase : List[Any] = num_gpus UpperCAmelCase : List[Any] = False if num_gpus > 1: UpperCAmelCase : Tuple = 'MULTI_GPU' else: UpperCAmelCase : Optional[Any] = 'NO' elif is_xpu_available() and use_xpu: UpperCAmelCase : Optional[int] = torch.xpu.device_count() UpperCAmelCase : Optional[int] = num_xpus UpperCAmelCase : Any = False if num_xpus > 1: UpperCAmelCase : Tuple = 'MULTI_XPU' else: UpperCAmelCase : str = 'NO' elif is_npu_available(): UpperCAmelCase : Optional[int] = torch.npu.device_count() UpperCAmelCase : str = num_npus UpperCAmelCase : int = False if num_npus > 1: UpperCAmelCase : int = 'MULTI_NPU' else: UpperCAmelCase : List[str] = 'NO' else: UpperCAmelCase : str = 0 UpperCAmelCase : int = True UpperCAmelCase : str = 1 UpperCAmelCase : str = 'NO' UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ ) config.to_json_file(UpperCAmelCase_ ) return path def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ ) parser.add_argument( '--config_file' , default=UpperCAmelCase_ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , dest='save_location' , ) parser.add_argument( '--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. ' 'Choose between FP16 and BF16 (bfloat16) training. ' 'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , ) parser.set_defaults(func=UpperCAmelCase_ ) return parser def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F"""accelerate configuration saved at {config_file}""" )
695
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class lowercase : def __init__( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Any=13 , _lowercase : int=2 , _lowercase : Optional[int]=24 , _lowercase : Any=16 , _lowercase : Optional[Any]=True , _lowercase : Tuple=True , _lowercase : Optional[Any]=32 , _lowercase : Union[str, Any]=5 , _lowercase : int=4 , _lowercase : int=37 , _lowercase : Optional[Any]="gelu" , _lowercase : str=0.1 , _lowercase : List[str]=0.1 , _lowercase : str=10 , _lowercase : List[str]=0.02 , _lowercase : Dict=None , _lowercase : Union[str, Any]=2 , _lowercase : List[str]=2 , ): SCREAMING_SNAKE_CASE__ : List[str] = parent SCREAMING_SNAKE_CASE__ : str = batch_size SCREAMING_SNAKE_CASE__ : str = patch_size SCREAMING_SNAKE_CASE__ : Optional[Any] = max_length SCREAMING_SNAKE_CASE__ : List[str] = num_mel_bins SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_training SCREAMING_SNAKE_CASE__ : Dict = use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size SCREAMING_SNAKE_CASE__ : str = hidden_act SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : int = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE__ : Tuple = scope SCREAMING_SNAKE_CASE__ : int = frequency_stride SCREAMING_SNAKE_CASE__ : int = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) SCREAMING_SNAKE_CASE__ : Optional[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 SCREAMING_SNAKE_CASE__ : str = (self.max_length - self.patch_size) // self.time_stride + 1 SCREAMING_SNAKE_CASE__ : str = frequency_out_dimension * time_out_dimension SCREAMING_SNAKE_CASE__ : Optional[Any] = num_patches + 2 def lowercase__ ( self : int ): SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config() return config, input_values, labels def lowercase__ ( self : Union[str, Any] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def lowercase__ ( self : Tuple , _lowercase : str , _lowercase : int , _lowercase : Tuple ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = ASTModel(config=_lowercase ) model.to(_lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : int ): SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : int = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = {'''input_values''': input_values} return config, inputs_dict @require_torch class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): lowerCamelCase : Optional[int] = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) lowerCamelCase : Tuple = ( {'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel} if is_torch_available() else {} ) lowerCamelCase : str = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : int = False lowerCamelCase : List[str] = False def lowercase__ ( self : int , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : int , _lowercase : Dict , _lowercase : int ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def lowercase__ ( self : Any ): SCREAMING_SNAKE_CASE__ : int = ASTModelTester(self ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 ) def lowercase__ ( self : Any ): self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''' ) def lowercase__ ( self : List[Any] ): pass def lowercase__ ( self : List[str] ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) ) def lowercase__ ( self : Tuple ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Tuple = model_class(_lowercase ) SCREAMING_SNAKE_CASE__ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : Tuple = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Optional[int] = ['''input_values'''] self.assertListEqual(arg_names[:1] , _lowercase ) def lowercase__ ( self : List[str] ): SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowercase ) @slow def lowercase__ ( self : Dict ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Tuple = ASTModel.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) def a ( ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = torchaudio.load(A__ ) return audio, sampling_rate @require_torch @require_torchaudio class lowercase ( unittest.TestCase ): @cached_property def lowercase__ ( self : str ): return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ) if is_torchaudio_available() else None ) @slow def lowercase__ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ : str = self.default_feature_extractor SCREAMING_SNAKE_CASE__ : Dict = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_feature_extractor SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_audio() SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio.squeeze().numpy() SCREAMING_SNAKE_CASE__ : int = feature_extractor(_lowercase , sampling_rate=_lowercase , return_tensors='''pt''' ).to(_lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Dict = model(**_lowercase ) # verify the logits SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((1, 5_27) ) self.assertEqual(outputs.logits.shape , _lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
35
def __a ( A__ : float , A__ : float ): if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A__ ) * abs(A__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
16
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : str = logging.get_logger(__name__) lowercase : int = { """microsoft/unispeech-sat-base-100h-libri-ft""": ( """https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json""" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class A__ ( __UpperCAmelCase ): """simple docstring""" __A : Tuple = '''unispeech-sat''' def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1e-5 , lowercase="group" , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=128 , lowercase=16 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase=320 , lowercase=2 , lowercase=0.1 , lowercase=100 , lowercase=256 , lowercase=256 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=504 , **lowercase , ) -> List[Any]: '''simple docstring''' super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase) a__ : List[str] = hidden_size a__ : List[Any] = feat_extract_norm a__ : str = feat_extract_activation a__ : Optional[int] = list(lowercase) a__ : Any = list(lowercase) a__ : str = list(lowercase) a__ : List[Any] = conv_bias a__ : List[str] = num_conv_pos_embeddings a__ : Union[str, Any] = num_conv_pos_embedding_groups a__ : List[Any] = len(self.conv_dim) a__ : Optional[Any] = num_hidden_layers a__ : Tuple = intermediate_size a__ : Dict = hidden_act a__ : Dict = num_attention_heads a__ : Any = hidden_dropout a__ : Optional[int] = attention_dropout a__ : Tuple = activation_dropout a__ : Tuple = feat_proj_dropout a__ : Optional[Any] = final_dropout a__ : Tuple = layerdrop a__ : int = layer_norm_eps a__ : Optional[Any] = initializer_range a__ : Dict = vocab_size a__ : str = num_clusters a__ : Any = do_stable_layer_norm a__ : int = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 a__ : str = apply_spec_augment a__ : Optional[int] = mask_time_prob a__ : List[str] = mask_time_length a__ : List[str] = mask_time_min_masks a__ : Optional[int] = mask_feature_prob a__ : List[Any] = mask_feature_length a__ : Optional[int] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations a__ : List[str] = num_codevectors_per_group a__ : Any = num_codevector_groups a__ : Optional[Any] = contrastive_logits_temperature a__ : Optional[Any] = feat_quantizer_dropout a__ : Union[str, Any] = num_negatives a__ : Optional[int] = codevector_dim a__ : Tuple = proj_codevector_dim a__ : Optional[int] = diversity_loss_weight # ctc loss a__ : Optional[Any] = ctc_loss_reduction a__ : List[Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. a__ : Dict = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. a__ : int = list(lowercase) a__ : str = list(lowercase) a__ : List[str] = list(lowercase) a__ : List[str] = xvector_output_dim @property def __lowercase ( self) -> List[Any]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
710
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase : Any = logging.get_logger(__name__) def A_ ( A__ , A__ ) -> List[Any]: a__ : List[str] = set() a__ : Union[str, Any] = [] def parse_line(A__ ): for line in fp: if isinstance(A__ , A__ ): a__ : str = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(A__ ) > 0: a__ : Dict = '\n'.join(A__ ) # Only keep the warnings specified in `targets` if any(F': {x}: ' in warning for x in targets ): selected_warnings.add(A__ ) buffer.clear() continue else: a__ : int = line.strip() buffer.append(A__ ) if from_gh: for filename in os.listdir(A__ ): a__ : List[Any] = os.path.join(A__ , A__ ) if not os.path.isdir(A__ ): # read the file if filename != "warnings.txt": continue with open(A__ ) as fp: parse_line(A__ ) else: try: with zipfile.ZipFile(A__ ) as z: for filename in z.namelist(): if not os.path.isdir(A__ ): # read the file if filename != "warnings.txt": continue with z.open(A__ ) as fp: parse_line(A__ ) except Exception: logger.warning( F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def A_ ( A__ , A__ ) -> Dict: a__ : List[str] = set() a__ : Tuple = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(A__ , A__ ) ) return selected_warnings if __name__ == "__main__": def A_ ( A__ ) -> Tuple: return values.split(',' ) lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase : List[Any] = parser.parse_args() lowercase : Union[str, Any] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 8_0) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase : Dict = extract_warnings(args.output_dir, args.targets) lowercase : Any = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
392
0
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : int = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) UpperCAmelCase_ : Dict = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight")) rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias")) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight")) rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias")) rename_keys.append( (F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", F"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", F"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight")) rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias")) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight")) rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias")) rename_keys.append( (F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.encoder.norm.weight', 'encoder.layernorm.weight'), ('transformer.encoder.norm.bias', 'encoder.layernorm.bias'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE : Tuple = val def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _SCREAMING_SNAKE_CASE : str = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = value else: _SCREAMING_SNAKE_CASE : int = value return new_state_dict def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = """""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _SCREAMING_SNAKE_CASE : int = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) _SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : Any = in_proj_weight[:256, :] _SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[:256] _SCREAMING_SNAKE_CASE : Dict = in_proj_weight[256:512, :] _SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[256:512] _SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :] _SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) _SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : Any = in_proj_weight[:256, :] _SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[:256] _SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :] _SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[256:512] _SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[-256:, :] _SCREAMING_SNAKE_CASE : Dict = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop( f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict _SCREAMING_SNAKE_CASE : Dict = in_proj_weight_cross_attn[:256, :] _SCREAMING_SNAKE_CASE : int = in_proj_bias_cross_attn[:256] _SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight_cross_attn[256:512, :] _SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias_cross_attn[256:512] _SCREAMING_SNAKE_CASE : int = in_proj_weight_cross_attn[-256:, :] _SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias_cross_attn[-256:] def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = image.size _SCREAMING_SNAKE_CASE : int = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE : Tuple = 800 if """detection""" in checkpoint_url else 1000 _SCREAMING_SNAKE_CASE : Dict = target_max_size / current_max_size _SCREAMING_SNAKE_CASE : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = F.to_tensor(SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE : int = F.normalize(SCREAMING_SNAKE_CASE__ , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" logger.info("""Converting model...""" ) # load original state dict _SCREAMING_SNAKE_CASE : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" ) # rename keys for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE : Optional[Any] = rename_backbone_keys(SCREAMING_SNAKE_CASE__ ) # query, key and value matrices need special treatment read_in_q_k_v(SCREAMING_SNAKE_CASE__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _SCREAMING_SNAKE_CASE : Optional[int] = """model.""" for key in state_dict.copy().keys(): if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): _SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE : List[Any] = val # create HuggingFace model and load state dict _SCREAMING_SNAKE_CASE : List[str] = TableTransformerConfig( backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: _SCREAMING_SNAKE_CASE : Optional[Any] = 15 _SCREAMING_SNAKE_CASE : int = 2 _SCREAMING_SNAKE_CASE : List[Any] = {0: """table""", 1: """table rotated"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel _SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in idalabel.items()} else: _SCREAMING_SNAKE_CASE : Dict = 125 _SCREAMING_SNAKE_CASE : Optional[Any] = 6 _SCREAMING_SNAKE_CASE : Optional[int] = { 0: """table""", 1: """table column""", 2: """table row""", 3: """table column header""", 4: """table projected row header""", 5: """table spanning cell""", } _SCREAMING_SNAKE_CASE : List[Any] = idalabel _SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE : Tuple = DetrImageProcessor( format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 ) _SCREAMING_SNAKE_CASE : str = TableTransformerForObjectDetection(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) model.eval() # verify our conversion _SCREAMING_SNAKE_CASE : int = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png""" _SCREAMING_SNAKE_CASE : Optional[Any] = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE : int = Image.open(SCREAMING_SNAKE_CASE__ ).convert("""RGB""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = normalize(resize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ).unsqueeze(0 ) _SCREAMING_SNAKE_CASE : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ) if "detection" in checkpoint_url: _SCREAMING_SNAKE_CASE : Dict = (1, 15, 3) _SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] ) _SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: _SCREAMING_SNAKE_CASE : Optional[int] = (1, 125, 7) _SCREAMING_SNAKE_CASE : List[Any] = torch.tensor( [[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] ) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: # Push model to HF hub logger.info("""Pushing model to the hub...""" ) _SCREAMING_SNAKE_CASE : List[Any] = ( """microsoft/table-transformer-detection""" if """detection""" in checkpoint_url else """microsoft/table-transformer-structure-recognition""" ) model.push_to_hub(SCREAMING_SNAKE_CASE__ ) image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ : List[Any] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', type=str, choices=[ 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth', ], help='URL of the Table Transformer checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase_ : List[str] = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
533
'''simple docstring''' import math def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _SCREAMING_SNAKE_CASE : List[str] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = factor * value _SCREAMING_SNAKE_CASE : Optional[Any] = value while not is_prime(SCREAMING_SNAKE_CASE__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ ) return value
533
1
import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = DownBlockaD # noqa F405 lowercase_ = """down""" def __UpperCamelCase ( self ): '''simple docstring''' __A =[-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = ResnetDownsampleBlockaD # noqa F405 lowercase_ = """down""" def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = AttnDownBlockaD # noqa F405 lowercase_ = """down""" def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = CrossAttnDownBlockaD # noqa F405 lowercase_ = """down""" def __UpperCamelCase ( self ): '''simple docstring''' __A , __A =super().prepare_init_args_and_inputs_for_common() __A =3_2 return init_dict, inputs_dict def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = SimpleCrossAttnDownBlockaD # noqa F405 lowercase_ = """down""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A , __A =super().prepare_init_args_and_inputs_for_common() __A =3_2 return init_dict, inputs_dict @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = SkipDownBlockaD # noqa F405 lowercase_ = """down""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A =[-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = AttnSkipDownBlockaD # noqa F405 lowercase_ = """down""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = DownEncoderBlockaD # noqa F405 lowercase_ = """down""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_temb=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A ={ '''in_channels''': 3_2, '''out_channels''': 3_2, } __A =self.dummy_input return init_dict, inputs_dict def __UpperCamelCase ( self ): '''simple docstring''' __A =[1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = AttnDownEncoderBlockaD # noqa F405 lowercase_ = """down""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_temb=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A ={ '''in_channels''': 3_2, '''out_channels''': 3_2, } __A =self.dummy_input return init_dict, inputs_dict def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = UNetMidBlockaD # noqa F405 lowercase_ = """mid""" def __UpperCamelCase ( self ): '''simple docstring''' __A ={ '''in_channels''': 3_2, '''temb_channels''': 1_2_8, } __A =self.dummy_input return init_dict, inputs_dict def __UpperCamelCase ( self ): '''simple docstring''' __A =[-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = UNetMidBlockaDCrossAttn # noqa F405 lowercase_ = """mid""" def __UpperCamelCase ( self ): '''simple docstring''' __A , __A =super().prepare_init_args_and_inputs_for_common() __A =3_2 return init_dict, inputs_dict def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = UNetMidBlockaDSimpleCrossAttn # noqa F405 lowercase_ = """mid""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A , __A =super().prepare_init_args_and_inputs_for_common() __A =3_2 return init_dict, inputs_dict def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = UpBlockaD # noqa F405 lowercase_ = """up""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A =[-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = ResnetUpsampleBlockaD # noqa F405 lowercase_ = """up""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = CrossAttnUpBlockaD # noqa F405 lowercase_ = """up""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A , __A =super().prepare_init_args_and_inputs_for_common() __A =3_2 return init_dict, inputs_dict def __UpperCamelCase ( self ): '''simple docstring''' __A =[-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = SimpleCrossAttnUpBlockaD # noqa F405 lowercase_ = """up""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ , include_encoder_hidden_states=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A , __A =super().prepare_init_args_and_inputs_for_common() __A =3_2 return init_dict, inputs_dict def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = AttnUpBlockaD # noqa F405 lowercase_ = """up""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ ) @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = SkipUpBlockaD # noqa F405 lowercase_ = """up""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A =[-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = AttnSkipUpBlockaD # noqa F405 lowercase_ = """up""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = UpDecoderBlockaD # noqa F405 lowercase_ = """up""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_temb=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A ={'''in_channels''': 3_2, '''out_channels''': 3_2} __A =self.dummy_input return init_dict, inputs_dict def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] super().test_output(lowercase__ ) class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowercase_ = AttnUpDecoderBlockaD # noqa F405 lowercase_ = """up""" @property def __UpperCamelCase ( self ): '''simple docstring''' return super().get_dummy_input(include_temb=lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A ={'''in_channels''': 3_2, '''out_channels''': 3_2} __A =self.dummy_input return init_dict, inputs_dict def __UpperCamelCase ( self ): '''simple docstring''' __A =[0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] super().test_output(lowercase__ )
516
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowercase_ = ViTImageProcessor if is_vision_available() else None @property def __UpperCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self ): '''simple docstring''' __A =(3, 3_2, 1_2_8) __A =tempfile.mkdtemp() # fmt: off __A =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __A =dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) __A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase__ ) + '''\n''' ) __A ={ '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 3_2, '''width''': 1_2_8}, } __A =os.path.join(self.tmpdirname , lowercase__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(lowercase__ , lowercase__ ) def __UpperCamelCase ( self , **lowercase__ ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase__ ) def __UpperCamelCase ( self , **lowercase__ ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self ): '''simple docstring''' __A =np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta ) __A =Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) return image_input def __UpperCamelCase ( self ): '''simple docstring''' __A =self.get_tokenizer() __A =self.get_image_processor() __A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ ) processor.save_pretrained(self.tmpdirname ) __A =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , lowercase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.get_tokenizer() __A =self.get_image_processor() __A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ ) processor.save_pretrained(self.tmpdirname ) __A =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __A =self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 ) __A =MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase__ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , lowercase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.get_image_processor() __A =self.get_tokenizer() __A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ ) __A =self.prepare_image_inputs() __A =image_processor(lowercase__ , return_tensors='''np''' ) __A =processor(images=lowercase__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.get_image_processor() __A =self.get_tokenizer() __A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ ) __A ='''test''' __A =processor(text=lowercase__ ) __A =tokenizer(lowercase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.get_image_processor() __A =self.get_tokenizer() __A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ ) __A ='''test''' __A =self.prepare_image_inputs() __A =processor(text=lowercase__ , images=lowercase__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(lowercase__ ): processor() def __UpperCamelCase ( self ): '''simple docstring''' __A =self.get_image_processor() __A =self.get_tokenizer() __A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ ) __A =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __A =processor.char_decode(lowercase__ ) __A =tokenizer.batch_decode(lowercase__ ) __A =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(lowercase__ , lowercase__ ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.get_image_processor() __A =self.get_tokenizer() __A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ ) __A =None __A =self.prepare_image_inputs() __A =processor(text=lowercase__ , images=lowercase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __UpperCamelCase ( self ): '''simple docstring''' __A =self.get_image_processor() __A =self.get_tokenizer() __A =MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ ) __A =torch.randn(1 , 2_7 , 3_8 ) __A =torch.randn(1 , 2_7 , 5_0_2_5_7 ) __A =torch.randn(1 , 2_7 , 3_0_5_2_2 ) __A =processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
516
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Any = 'xmod' def __init__( self : Dict , __SCREAMING_SNAKE_CASE : List[Any]=30522 , __SCREAMING_SNAKE_CASE : List[Any]=768 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Tuple=3072 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1e-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Any="absolute" , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Any=("en_XX",) , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Dict , ) -> Optional[Any]: super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =vocab_size __UpperCAmelCase =hidden_size __UpperCAmelCase =num_hidden_layers __UpperCAmelCase =num_attention_heads __UpperCAmelCase =hidden_act __UpperCAmelCase =intermediate_size __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =max_position_embeddings __UpperCAmelCase =type_vocab_size __UpperCAmelCase =initializer_range __UpperCAmelCase =layer_norm_eps __UpperCAmelCase =position_embedding_type __UpperCAmelCase =use_cache __UpperCAmelCase =classifier_dropout __UpperCAmelCase =pre_norm __UpperCAmelCase =adapter_reduction_factor __UpperCAmelCase =adapter_layer_norm __UpperCAmelCase =adapter_reuse_layer_norm __UpperCAmelCase =ln_before_adapter __UpperCAmelCase =list(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =default_language class _A ( UpperCamelCase ): """simple docstring""" @property def _a ( self : int ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __UpperCAmelCase ={0: """batch""", 1: """choice""", 2: """sequence"""} else: __UpperCAmelCase ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
68
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
512
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE = { "configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = [ "MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MegatronBertForCausalLM", "MegatronBertForMaskedLM", "MegatronBertForMultipleChoice", "MegatronBertForNextSentencePrediction", "MegatronBertForPreTraining", "MegatronBertForQuestionAnswering", "MegatronBertForSequenceClassification", "MegatronBertForTokenClassification", "MegatronBertModel", "MegatronBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
23
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE ) if decimal in (0, 1): # Exit cases for the recursion return str(__SCREAMING_SNAKE_CASE ) UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 ) return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE ) def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip() if not number: raise ValueError("No input value was provided" ) UpperCAmelCase_ = "-" if number.startswith("-" ) else "" UpperCAmelCase_ = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
23
1
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( '''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __lowerCAmelCase , ) class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :str = RobertaConfig lowerCamelCase :int = '''roberta''' def __init__( self , lowerCAmelCase_ ) -> Dict: super().__init__(lowerCAmelCase_ ) _A = RobertaEmbeddings(lowerCAmelCase_ ) self.init_weights() @add_start_docstrings( '''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, also takes care of multi-layer training. ''' , __lowerCAmelCase , ) class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Tuple = RobertaConfig lowerCamelCase :List[str] = '''roberta''' def __init__( self , lowerCAmelCase_ ) -> List[Any]: super().__init__(lowerCAmelCase_ ) _A = config.num_labels _A = config.num_hidden_layers _A = DeeRobertaModel(lowerCAmelCase_ ) _A = nn.Dropout(config.hidden_dropout_prob ) _A = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=-1 , lowerCAmelCase_=False , ) -> int: _A = self.num_layers try: _A = self.roberta( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , ) _A = outputs[1] _A = self.dropout(lowerCAmelCase_ ) _A = self.classifier(lowerCAmelCase_ ) _A = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A = e.message _A = e.exit_layer _A = outputs[0] if not self.training: _A = entropy(lowerCAmelCase_ ) _A = [] _A = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A = MSELoss() _A = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A = CrossEntropyLoss() _A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A = [] for highway_exit in outputs[-1]: _A = highway_exit[0] if not self.training: highway_logits_all.append(lowerCAmelCase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A = MSELoss() _A = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A = CrossEntropyLoss() _A = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(lowerCAmelCase_ ) if train_highway: _A = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A = (loss,) + outputs if not self.training: _A = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
401
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]=13 , lowerCamelCase : Dict=32 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : List[Any]=4 , lowerCamelCase : Tuple=[10, 20, 30, 40] , lowerCamelCase : str=[2, 2, 3, 2] , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[Any]=True , lowerCamelCase : Union[str, Any]=37 , lowerCamelCase : Any="gelu" , lowerCamelCase : Tuple=10 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"] , lowerCamelCase : Any=[2, 3, 4] , lowerCamelCase : Tuple=None , ) -> int: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = num_stages _UpperCAmelCase = hidden_sizes _UpperCAmelCase = depths _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = num_labels _UpperCAmelCase = initializer_range _UpperCAmelCase = out_features _UpperCAmelCase = out_indices _UpperCAmelCase = scope def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : Optional[Any] ) -> Any: """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ) -> str: """simple docstring""" _UpperCAmelCase = ConvNextModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _UpperCAmelCase = model(lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[Any] ) -> int: """simple docstring""" _UpperCAmelCase = ConvNextForImageClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase = ConvNextBackbone(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _UpperCAmelCase = model(lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None _UpperCAmelCase = None _UpperCAmelCase = ConvNextBackbone(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _UpperCAmelCase = model(lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase ( self : str ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) _lowerCamelCase = ( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) _lowerCamelCase = True _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def lowerCamelCase ( self : Dict ) -> int: """simple docstring""" _UpperCAmelCase = ConvNextModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 ) def lowerCamelCase ( self : int ) -> int: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : str ) -> Union[str, Any]: """simple docstring""" return @unittest.skip(reason="""ConvNext does not use inputs_embeds""" ) def lowerCamelCase ( self : List[Any] ) -> Any: """simple docstring""" pass @unittest.skip(reason="""ConvNext does not support input and output embeddings""" ) def lowerCamelCase ( self : int ) -> int: """simple docstring""" pass @unittest.skip(reason="""ConvNext does not use feedforward chunking""" ) def lowerCamelCase ( self : Optional[int] ) -> int: """simple docstring""" pass def lowerCamelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(lowerCamelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase ) def lowerCamelCase ( self : Tuple ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase ) def lowerCamelCase ( self : int ) -> Any: """simple docstring""" def check_hidden_states_output(lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ): _UpperCAmelCase = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) ) _UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCAmelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def lowerCamelCase ( self : Any ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) @slow def lowerCamelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = ConvNextModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( ) -> Tuple: _UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase ( self : Dict ) -> int: """simple docstring""" return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None @slow def lowerCamelCase ( self : List[Any] ) -> int: """simple docstring""" _UpperCAmelCase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(lowerCamelCase ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**lowerCamelCase ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) _UpperCAmelCase = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , UpperCAmelCase ): '''simple docstring''' _lowerCamelCase = (ConvNextBackbone,) if is_torch_available() else () _lowerCamelCase = ConvNextConfig _lowerCamelCase = False def lowerCamelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = ConvNextModelTester(self )
108
0
import argparse import json from tqdm import tqdm def __magic_name__( ) -> List[str]: '''simple docstring''' _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , ) parser.add_argument( '''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , ) parser.add_argument( '''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , ) _lowerCamelCase = parser.parse_args() with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open( args.gold_data_path , '''w''' ) as gold_file: _lowerCamelCase = json.load(__UpperCAmelCase ) for dpr_record in tqdm(__UpperCAmelCase ): _lowerCamelCase = dpr_record['''question'''] _lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']] eval_file.write(question + '''\n''' ) gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' ) if __name__ == "__main__": main()
702
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def __magic_name__( __UpperCAmelCase ) -> str: '''simple docstring''' _lowerCamelCase = model.config _lowerCamelCase = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) _lowerCamelCase = MBartConfig( is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , ) return encoder_config, decoder_config def __magic_name__( __UpperCAmelCase ) -> Tuple: '''simple docstring''' if "encoder.model" in name: _lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' ) if "decoder.model" in name: _lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' ) if "patch_embed.proj" in name: _lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: _lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if name.startswith('''encoder''' ): if "layers" in name: _lowerCamelCase = '''encoder.''' + name if "attn.proj" in name: _lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name and "mask" not in name: _lowerCamelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: _lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: _lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: _lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: _lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": _lowerCamelCase = '''encoder.layernorm.weight''' if name == "encoder.norm.bias": _lowerCamelCase = '''encoder.layernorm.bias''' return name def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): _lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase ) if "qkv" in key: _lowerCamelCase = key.split('''.''' ) _lowerCamelCase = int(key_split[3] ) _lowerCamelCase = int(key_split[5] ) _lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _lowerCamelCase = val[:dim, :] _lowerCamelCase = val[dim : dim * 2, :] _lowerCamelCase = val[-dim:, :] else: _lowerCamelCase = val[:dim] _lowerCamelCase = val[dim : dim * 2] _lowerCamelCase = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: _lowerCamelCase = val return orig_state_dict def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int: '''simple docstring''' _lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval() # load HuggingFace model _lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase ) _lowerCamelCase = DonutSwinModel(__UpperCAmelCase ) _lowerCamelCase = MBartForCausalLM(__UpperCAmelCase ) _lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase ) model.eval() _lowerCamelCase = original_model.state_dict() _lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase ) model.load_state_dict(__UpperCAmelCase ) # verify results on scanned document _lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' ) _lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' ) _lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase ) _lowerCamelCase = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) _lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase ) _lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": _lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' _lowerCamelCase = '''When is the coffee break?''' _lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": _lowerCamelCase = '''<s_rvlcdip>''' elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: _lowerCamelCase = '''<s_cord>''' elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": _lowerCamelCase = '''s_cord-v2>''' elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": _lowerCamelCase = '''<s_zhtrainticket>''' elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt _lowerCamelCase = '''hello world''' else: raise ValueError('''Model name not supported''' ) _lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[ '''input_ids''' ] _lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase ) _lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase ) assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) # verify encoder hidden states _lowerCamelCase = original_model.encoder(__UpperCAmelCase ) _lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) # verify decoder hidden states _lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits _lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(__UpperCAmelCase ) processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' ) processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) snake_case__ = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
638
0